source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
dash_buffer.py
|
from __future__ import division
import Queue
import threading
import time
import csv
import os
import config_dash
from stop_watch import StopWatch
# Durations in seconds
PLAYER_STATES = ['INITIALIZED', 'INITIAL_BUFFERING', 'PLAY',
'PAUSE', 'BUFFERING', 'STOP', 'END']
EXIT_STATES = ['STOP', 'END']
class DashPlayer:
""" DASH buffer class """
def __init__(self, video_length, segment_duration):
config_dash.LOG.info("Initializing the Buffer")
self.player_thread = None
self.playback_start_time = None
self.playback_duration = video_length
self.segment_duration = segment_duration
#print "video_length = {}".format(video_length)
#print "segment_duration = {}".format(segment_duration)
# Timers to keep track of playback time and the actual time
self.playback_timer = StopWatch()
self.actual_start_time = None
# Playback State
self.playback_state = "INITIALIZED"
self.playback_state_lock = threading.Lock()
# Buffer size
if config_dash.MAX_BUFFER_SIZE:
self.max_buffer_size = config_dash.MAX_BUFFER_SIZE
else:
self.max_buffer_size = video_length
# Duration of the current buffer
self.buffer_length = 0
self.buffer_length_lock = threading.Lock()
# Buffer Constants
self.initial_buffer = config_dash.INITIAL_BUFFERING_COUNT
self.alpha = config_dash.ALPHA_BUFFER_COUNT
self.beta = config_dash.BETA_BUFFER_COUNT
self.segment_limit = None
# Current video buffer that holds the segment data
self.buffer = Queue.Queue()
self.buffer_lock = threading.Lock()
self.current_segment = None
self.buffer_log_file = config_dash.BUFFER_LOG_FILENAME
config_dash.LOG.info("VideoLength={},segmentDuration={},MaxBufferSize={},InitialBuffer(secs)={},"
"BufferAlph(secs)={},BufferBeta(secs)={}".format(self.playback_duration,
self.segment_duration,
self.max_buffer_size, self.initial_buffer,
self.alpha, self.beta))
def set_state(self, state):
""" Function to set the state of the player"""
state = state.upper()
if state in PLAYER_STATES:
self.playback_state_lock.acquire()
config_dash.LOG.info("Changing state from {} to {} at {} Playback time ".format(self.playback_state, state,
self.playback_timer.time()))
self.playback_state = state
self.playback_state_lock.release()
else:
config_dash.LOG.error("Unidentified state: {}".format(state))
def initialize_player(self):
"""Method that update the current playback time"""
start_time = time.time()
initial_wait = 0
paused = False
buffering = False
interruption_start = None
config_dash.LOG.info("Initialized player with video length {}".format(self.playback_duration))
while True:
# Video stopped by the user
if self.playback_state == "END":
config_dash.LOG.info("Finished playback of the video: {} seconds of video played for {} seconds".format(
self.playback_duration, time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
return "STOPPED"
if self.playback_state == "STOP":
# If video is stopped quit updating the playback time and exit player
config_dash.LOG.info("Player Stopped at time {}".format(
time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
self.log_entry("Stopped")
return "STOPPED"
# If paused by user
if self.playback_state == "PAUSE":
if not paused:
# do not update the playback time. Wait for the state to change
config_dash.LOG.info("Player Paused after {:4.2f} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
paused = True
continue
# If the playback encounters buffering during the playback
if self.playback_state == "BUFFERING":
if not buffering:
config_dash.LOG.info("Entering buffering stage after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
buffering = True
interruption_start = time.time()
config_dash.JSON_HANDLE['playback_info']['interruptions']['count'] += 1
# If the size of the buffer is greater than the RE_BUFFERING_DURATION then start playback
else:
# If the RE_BUFFERING_DURATION is greate than the remiang length of the video then do not wait
remaining_playback_time = self.playback_duration - self.playback_timer.time()
if ((self.buffer.qsize() >= config_dash.RE_BUFFERING_COUNT) or (
config_dash.RE_BUFFERING_COUNT * self.segment_duration >= remaining_playback_time
and self.buffer.qsize() > 0)):
buffering = False
if interruption_start:
interruption_end = time.time()
interruption = interruption_end - interruption_start
config_dash.JSON_HANDLE['playback_info']['interruptions']['events'].append(
(interruption_start, interruption_end))
config_dash.JSON_HANDLE['playback_info']['interruptions']['total_duration'] += interruption
config_dash.LOG.info("Duration of interruption = {}".format(interruption))
interruption_start = None
self.set_state("PLAY")
self.log_entry("Buffering-Play")
if self.playback_state == "INITIAL_BUFFERING":
if self.buffer.qsize() < config_dash.INITIAL_BUFFERING_COUNT:
initial_wait = time.time() - start_time
continue
else:
config_dash.LOG.info("Initial Waiting Time = {}".format(initial_wait))
config_dash.JSON_HANDLE['playback_info']['initial_buffering_duration'] = initial_wait
config_dash.JSON_HANDLE['playback_info']['start_time'] = time.time()
self.set_state("PLAY")
self.log_entry("InitialBuffering-Play")
if self.playback_state == "PLAY":
# Check of the buffer has any segments
if self.playback_timer.time() == self.playback_duration:
self.set_state("END")
self.log_entry("Play-End")
if self.buffer.qsize() == 0:
config_dash.LOG.info("Buffer empty after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
self.set_state("BUFFERING")
self.log_entry("Play-Buffering")
continue
# Read one the segment from the buffer
# Acquire Lock on the buffer and read a segment for it
self.buffer_lock.acquire()
play_segment = self.buffer.get()
self.buffer_lock.release()
config_dash.LOG.info("Reading the segment number {} from the buffer at playtime {}".format(
play_segment['segment_number'], self.playback_timer.time()))
self.log_entry(action="StillPlaying", bitrate=play_segment["bitrate"])
# Calculate time playback when the segment finishes
future = self.playback_timer.time() + play_segment['playback_length']
# Start the playback
self.playback_timer.start()
while self.playback_timer.time() < future:
# If playback hasn't started yet, set the playback_start_time
if not self.playback_start_time:
self.playback_start_time = time.time()
config_dash.LOG.info("Started playing with representation {} at {}".format(
play_segment['bitrate'], self.playback_timer.time()))
# Duration for which the video was played in seconds (integer)
if self.playback_timer.time() >= self.playback_duration:
config_dash.LOG.info("Completed the video playback: {} seconds".format(
self.playback_duration))
self.playback_timer.pause()
self.set_state("END")
self.log_entry("TheEnd")
return
else:
self.buffer_length_lock.acquire()
self.buffer_length -= int(play_segment['playback_length'])
config_dash.LOG.debug("Decrementing buffer_length by {}. dash_buffer = {}".format(
play_segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
if self.segment_limit:
if int(play_segment['segment_number']) >= self.segment_limit:
self.set_state("STOP")
config_dash.LOG.info("Stopped playback after segment {} at playtime {}".format(
play_segment['segment_number'], self.playback_duration))
def write(self, segment):
""" write segment to the buffer.
Segment is dict with keys ['data', 'bitrate', 'playback_length', 'URI', 'size']
"""
# Acquire Lock on the buffer and add a segment to it
if not self.actual_start_time:
self.actual_start_time = time.time()
config_dash.JSON_HANDLE['playback_info']['start_time'] = self.actual_start_time
config_dash.LOG.info("Writing segment {} at time {}".format(segment['segment_number'],
time.time() - self.actual_start_time))
self.buffer_lock.acquire()
self.buffer.put(segment)
self.buffer_lock.release()
self.buffer_length_lock.acquire()
self.buffer_length += int(segment['playback_length'])
config_dash.LOG.debug("Incrementing buffer_length by {}. dash_buffer = {}".format(
segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
self.log_entry(action="Writing", bitrate=segment['bitrate'])
def start(self):
""" Start playback"""
self.set_state("INITIAL_BUFFERING")
self.log_entry("Starting")
config_dash.LOG.info("Starting the Player")
self.player_thread = threading.Thread(target=self.initialize_player)
self.player_thread.daemon = True
self.player_thread.start()
self.log_entry(action="Starting")
def stop(self):
"""Method to stop the playback"""
self.set_state("STOP")
self.log_entry("Stopped")
config_dash.LOG.info("Stopped the playback")
def log_entry(self, action, bitrate=0):
"""Method to log the current state"""
if self.buffer_log_file:
header_row = None
if self.actual_start_time:
log_time = time.time() - self.actual_start_time
else:
log_time = 0
if not os.path.exists(self.buffer_log_file):
header_row = "EpochTime,CurrentPlaybackTime,CurrentBufferSize,CurrentPlaybackState,Action,Bitrate".split(",")
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
else:
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
str_stats = [str(i) for i in stats]
with open(self.buffer_log_file, "ab") as log_file_handle:
result_writer = csv.writer(log_file_handle, delimiter=",")
if header_row:
result_writer.writerow(header_row)
result_writer.writerow(str_stats)
config_dash.LOG.info("BufferStats: EpochTime=%s,CurrentPlaybackTime=%s,CurrentBufferSize=%s,"
"CurrentPlaybackState=%s,Action=%s,Bitrate=%s" % tuple(str_stats))
|
helpers.py
|
# All helpers method here
import time
import logging
import threading
from .iv import InformationValue
from ipywidgets.widgets import FloatProgress
# from IPython.display import display
logger = logging.getLogger(__name__)
class Queue:
def put(self, item):
self.item = item
def get(self):
return self.item
def work(progress, q):
total = 100
for i in range(total):
end = q.get()
if end:
progress.value = 1
return
elif i <= (total - 20):
time.sleep(0.07)
# time.sleep(0.75)
progress.value = float(i + 1) / total
else:
# don't let this process finished before the main
logger.debug("going to while loop inside work")
inc = i
while True:
end = q.get()
if end:
logger.debug(
"setting progess value to 0.98 and coming out")
progress.value = 1
# time.sleep(0.1)
return
else:
logger.debug("inside else, worker")
# slowly increase time
if progress.value < 0.95:
time.sleep(1)
progress.value = float(inc + 1) / total
inc += 1
logger.debug(
"check condition; new progress value: %s" % progress.value)
logger.debug("i: %s" % i)
logger.debug("out of for loop in work")
return
def progressbar(func):
def wrapper(*args, **kwargs):
q = Queue()
que = Queue()
q.put(False)
progress = FloatProgress(value=0.0, min=0.0, max=1.0)
pbar_thread = threading.Thread(target=work, args=(progress, q))
worker = threading.Thread(target=lambda q, *args, **kwargs: q.put(func(*args, **kwargs)),
args=(que, *args), kwargs=kwargs)
pbar_thread.start()
worker.start()
index = kwargs['index']
children = kwargs['children']
parent = kwargs['parent']
# first make progess bar as child
child = progress # wrapper(*args, **kwargs)
children.pop(index)
children.insert(index, child)
parent.children = children
# wait for worker thread to complete
logger.debug("waiting for the worker, with progressbar")
worker.join()
q.put(True)
logger.debug("done waiting for the worker.")
time.sleep(0.1)
# assing main child instead of progress bar
child = que.get()
children.pop(index)
children.insert(index, child)
parent.children = children
return
return wrapper
def infvalue(func):
logger.debug("invoking InformationValue object")
iv = InformationValue()
logger.debug("InformationValue object created")
def preds_predpower(*args, **kwargs):
logger.debug("calling get_iv_scores method on InformationValue object")
preds = iv.get_iv_scores(*args, **kwargs)
logger.debug("get_iv_scores succeded, got all predictors")
preds = preds.to_numpy()
return preds
return preds_predpower
def color_lt(x, threshold, color):
if x > threshold:
color = color
else:
color = ''
return 'color: %s' % color
def highlight_bg_gtr(x, threshold, color):
if x > threshold:
color = color # '#FF4500'
else:
color = ''
return 'background-color:%s' % color
def highlight_bg_ltr(x, threshold, color):
if x <= threshold:
color = color # '#FF4500'
else:
color = ''
return 'background-color:%s' % color
def apply_thresh_style(df):
return df.style.\
applymap(lambda x: highlight_bg_ltr(x, 0, 'lawngreen'), subset=['missing']).\
applymap(lambda x: color_lt(x, 0, 'blue'),
subset=['missing']).\
applymap(lambda x: highlight_bg_gtr(
x, 0, 'orange'), subset=['missing'])
@infvalue
def predictors(df, target):
pass
|
contandoThreadsAtivas.py
|
# CONTANDO THREADS ATIVAS
from concurrent.futures import thread
import threading
import time
import random
def minhaThread(i):
print("Thread {}: inicializada".format(i))
time.sleep(random.randint(1,5))
print("\nThread {}: finalizada".format(i))
for i in range(random.randint(2,50)):
thread=threading.Thread(target=minhaThread,args=(i, ))
thread.start()
time.sleep(4)
print("Total de Threads ativas: {}".format(threading.active_count()))
|
pretrained.py
|
# Copyright 2017-2022 John Snow Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes for the :class:`PretrainedPipeline` and downloading Pretrained Models.
"""
import sparknlp.internal as _internal
import threading
import time
from pyspark.sql import DataFrame
from sparknlp.annotator import *
from sparknlp.base import LightPipeline
from pyspark.ml import PipelineModel
from py4j.protocol import Py4JJavaError
def printProgress(stop):
states = [' | ', ' / ', ' — ', ' \\ ']
nextc = 0
while True:
sys.stdout.write('\r[{}]'.format(states[nextc]))
sys.stdout.flush()
time.sleep(2.5)
nextc = nextc + 1 if nextc < 3 else 0
if stop():
sys.stdout.write('\r[{}]'.format('OK!'))
sys.stdout.flush()
break
sys.stdout.write('\n')
return
class ResourceDownloader(object):
@staticmethod
def downloadModel(reader, name, language, remote_loc=None, j_dwn='PythonResourceDownloader'):
print(name + " download started this may take some time.")
file_size = _internal._GetResourceSize(name, language, remote_loc).apply()
if file_size == "-1":
print("Can not find the model to download please check the name!")
else:
print("Approximate size to download " + file_size)
stop_threads = False
t1 = threading.Thread(target=printProgress, args=(lambda: stop_threads,))
t1.start()
try:
j_obj = _internal._DownloadModel(reader.name, name, language, remote_loc, j_dwn).apply()
except Py4JJavaError as e:
sys.stdout.write("\n" + str(e))
raise e
finally:
stop_threads = True
t1.join()
return reader(classname=None, java_model=j_obj)
@staticmethod
def downloadModelDirectly(name, remote_loc="public/models"):
_internal._DownloadModelDirectly(name, remote_loc).apply()
@staticmethod
def downloadPipeline(name, language, remote_loc=None):
print(name + " download started this may take some time.")
file_size = _internal._GetResourceSize(name, language, remote_loc).apply()
if file_size == "-1":
print("Can not find the model to download please check the name!")
else:
print("Approx size to download " + file_size)
stop_threads = False
t1 = threading.Thread(target=printProgress, args=(lambda: stop_threads,))
t1.start()
try:
j_obj = _internal._DownloadPipeline(name, language, remote_loc).apply()
jmodel = PipelineModel._from_java(j_obj)
finally:
stop_threads = True
t1.join()
return jmodel
@staticmethod
def clearCache(name, language, remote_loc=None):
_internal._ClearCache(name, language, remote_loc).apply()
@staticmethod
def showPublicModels(annotator=None, lang=None, version=None):
print(_internal._ShowPublicModels(annotator, lang, version).apply())
@staticmethod
def showPublicPipelines(lang=None, version=None):
print(_internal._ShowPublicPipelines(lang, version).apply())
@staticmethod
def showUnCategorizedResources():
print(_internal._ShowUnCategorizedResources().apply())
@staticmethod
def showAvailableAnnotators():
print(_internal._ShowAvailableAnnotators().apply())
class PretrainedPipeline:
"""Loads a Represents a fully constructed and trained Spark NLP pipeline,
ready to be used.
This way, a whole pipeline can be defined in 1 line. Additionally, the
:class:`.LightPipeline` version of the model can be retrieved with member
:attr:`.light_model`.
For more extended examples see the `Pipelines page
<https://nlp.johnsnowlabs.com/docs/en/pipelines>`_ and our `Github Model
Repository <https://github.com/JohnSnowLabs/spark-nlp-models>`_ for
available pipeline models.
Parameters
----------
name : str
Name of the PretrainedPipeline. These can be gathered from the Pipelines
Page.
lang : str, optional
Langauge of the model, by default 'en'
remote_loc : str, optional
Link to the remote location of the model (if it was already downloaded),
by default None
parse_embeddings : bool, optional
Whether to parse embeddings, by default False
disk_location : str , optional
Path to locally stored PretrainedPipeline, by default None
"""
def __init__(self, name, lang='en', remote_loc=None, parse_embeddings=False, disk_location=None):
if not disk_location:
self.model = ResourceDownloader().downloadPipeline(name, lang, remote_loc)
else:
self.model = PipelineModel.load(disk_location)
self.light_model = LightPipeline(self.model, parse_embeddings)
@staticmethod
def from_disk(path, parse_embeddings=False):
return PretrainedPipeline(None, None, None, parse_embeddings, path)
def annotate(self, target, column=None):
"""Annotates the data provided, extracting the results.
The data should be either a list or a str.
Parameters
----------
target : list or str
The data to be annotated
Returns
-------
List[dict] or dict
The result of the annotation
Examples
--------
>>> from sparknlp.pretrained import PretrainedPipeline
>>> explain_document_pipeline = PretrainedPipeline("explain_document_dl")
>>> result = explain_document_pipeline.annotate('U.N. official Ekeus heads for Baghdad.')
>>> result.keys()
dict_keys(['entities', 'stem', 'checked', 'lemma', 'document', 'pos', 'token', 'ner', 'embeddings', 'sentence'])
>>> result["ner"]
['B-ORG', 'O', 'O', 'B-PER', 'O', 'O', 'B-LOC', 'O']
"""
if type(target) is DataFrame:
if not column:
raise Exception("annotate() column arg needed when targeting a DataFrame")
return self.model.transform(target.withColumnRenamed(column, "text"))
elif type(target) is list or type(target) is str:
pipeline = self.light_model
return pipeline.annotate(target)
else:
raise Exception("target must be either a spark DataFrame, a list of strings or a string")
def fullAnnotate(self, target, column=None):
"""Annotates the data provided into `Annotation` type results.
The data should be either a list or a str.
Parameters
----------
target : list or str
The data to be annotated
Returns
-------
List[Annotation]
The result of the annotation
Examples
--------
>>> from sparknlp.pretrained import PretrainedPipeline
>>> explain_document_pipeline = PretrainedPipeline("explain_document_dl")
>>> result = explain_document_pipeline.fullAnnotate('U.N. official Ekeus heads for Baghdad.')
>>> result[0].keys()
dict_keys(['entities', 'stem', 'checked', 'lemma', 'document', 'pos', 'token', 'ner', 'embeddings', 'sentence'])
>>> result[0]["ner"]
[Annotation(named_entity, 0, 2, B-ORG, {'word': 'U.N'}),
Annotation(named_entity, 3, 3, O, {'word': '.'}),
Annotation(named_entity, 5, 12, O, {'word': 'official'}),
Annotation(named_entity, 14, 18, B-PER, {'word': 'Ekeus'}),
Annotation(named_entity, 20, 24, O, {'word': 'heads'}),
Annotation(named_entity, 26, 28, O, {'word': 'for'}),
Annotation(named_entity, 30, 36, B-LOC, {'word': 'Baghdad'}),
Annotation(named_entity, 37, 37, O, {'word': '.'})]
"""
if type(target) is DataFrame:
if not column:
raise Exception("annotate() column arg needed when targeting a DataFrame")
return self.model.transform(target.withColumnRenamed(column, "text"))
elif type(target) is list or type(target) is str:
pipeline = self.light_model
return pipeline.fullAnnotate(target)
else:
raise Exception("target must be either a spark DataFrame, a list of strings or a string")
def transform(self, data):
"""Transforms the input dataset with Spark.
Parameters
----------
data : :class:`pyspark.sql.DataFrame`
input dataset
Returns
-------
:class:`pyspark.sql.DataFrame`
transformed dataset
"""
return self.model.transform(data)
|
keep_alive_monitor.py
|
# std
import logging
import urllib.request
from datetime import datetime
from threading import Thread
from time import sleep
from typing import List
# project
from . import EventService, Event, EventType, EventPriority
class KeepAliveMonitor:
"""Runs a separate thread to monitor time passed
since last keep-alive event was received (for all services)
If a service stopped responding and is no longer
sending events, this class will trigger a high priority
user event and propagate it to the notifier.
There's also an option to enable pinging to a remote service
that provides a second layer of redundancy. E.g. if this monitoring
thread crashes and stops responding, the remote service will stop
receiving keep-alive ping events and can notify the user.
"""
def __init__(self, config: dict = None, thresholds: dict = None):
self._notify_manager = None
self._last_keep_alive = {EventService.HARVESTER: datetime.now()}
self._last_keep_alive_threshold_seconds = thresholds or {EventService.HARVESTER: 300}
self._ping_url = None
if config and config["enable_remote_ping"]:
self._ping_url = config["ping_url"]
logging.info(f"Enabled remote pinging to {self._ping_url}")
# Infer check period from minimum threshold (arbitrary decision)
# Note that this period defines how often high priority notifications
# will be re-triggered so < 5 min is not recommended
self._check_period = float("inf")
for threshold in self._last_keep_alive_threshold_seconds.values():
self._check_period = min(threshold, self._check_period)
logging.info(f"Keep-alive check period: {self._check_period} seconds")
if self._check_period < 300:
logging.warning(
"Check period below 5 minutes might result "
"in very frequent high priority notifications "
"in case something stops working. Is it intended?"
)
# Start thread
self._is_running = True
self._keep_alive_check_thread = Thread(target=self.check_last_keep_alive)
self._keep_alive_check_thread.start()
def set_notify_manager(self, notify_manager):
self._notify_manager = notify_manager
def check_last_keep_alive(self):
"""This function runs in separate thread in the background
and continuously checks that keep-alive events have been received
"""
last_check = datetime.now()
while self._is_running:
sleep(1) # Not sleeping entire check period so we can interrupt
if (datetime.now() - last_check).seconds < self._check_period:
continue
last_check = datetime.now()
self._ping_remote()
events = []
for service in self._last_keep_alive.keys():
seconds_since_last = (datetime.now() - self._last_keep_alive[service]).seconds
logging.debug(f"Keep-alive check for {service.name}: Last activity {seconds_since_last} seconds ago.")
if seconds_since_last > self._last_keep_alive_threshold_seconds[service]:
message = (
f"Inactive for {seconds_since_last} seconds."
)
logging.warning(message)
events.append(
Event(
type=EventType.USER,
priority=EventPriority.HIGH,
service=EventService.HARVESTER,
message=message,
)
)
if len(events):
if self._notify_manager:
self._notify_manager.process_events(events)
else:
logging.warning("Notify manager is not set - can't propagate high priority event!")
def process_events(self, events: List[Event]):
"""Update last keep alive timestamp with any new keep-alive events"""
for event in events:
if event.type == EventType.KEEPALIVE:
logging.debug(f"Received keep-alive event from {event.service.name}")
self._last_keep_alive[event.service] = datetime.now()
def _ping_remote(self):
"""Ping a remote watchdog that monitors that chiadog is alive
and hasn't crashed silently. Second level of redundancy ;-)
"""
if self._ping_url:
logging.debug("Pinging remote keep-alive endpoint")
try:
urllib.request.urlopen(self._ping_url, timeout=10)
except Exception as e:
logging.error(f"Failed to ping keep-alive: {e}")
def stop(self):
logging.info("Stopping")
self._is_running = False
|
threadDemo.py
|
#!python3
#
import threading, time
print("Start of program.")
def takeANap():
time.sleep(5)
print("Wake up!")
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print("End of program.")
|
inf_ctr_mod.py
|
import time, threading, sys
def counter(c_var):
while True:
c_var = c_var + 1
#print(c_var)
if __name__ == "__main__":
c = 0
ct = threading.Thread(target = counter, args = (c,))
ct.start()
print("Counter started.")
time.sleep(2)
print(c)
sys.exit()
|
034.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
多线程
'''
import time, threading
# 新线程执行的代码:
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended.' % threading.current_thread().name)
|
train_sampling_unsupervised.py
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
from torch.nn.parallel import DistributedDataParallel
import tqdm
import traceback
import sklearn.linear_model as lm
import sklearn.metrics as skm
from utils import thread_wrapped_func
class NegativeSampler(object):
def __init__(self, g, k, neg_share=False):
self.weights = g.in_degrees().float() ** 0.75
self.k = k
self.neg_share = neg_share
def __call__(self, g, eids):
src, _ = g.find_edges(eids)
n = len(src)
if self.neg_share and n % self.k == 0:
dst = self.weights.multinomial(n, replacement=True)
dst = dst.view(-1, 1, self.k).expand(-1, self.k, -1).flatten()
else:
dst = self.weights.multinomial(n*self.k, replacement=True)
src = src.repeat_interleave(self.k)
return src, dst
def load_subtensor(g, input_nodes, device):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(device)
return batch_inputs
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
for i in range(1, n_layers - 1):
self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
self.dropout = nn.Dropout(dropout)
self.activation = activation
def forward(self, blocks, x):
h = x
for l, (layer, block) in enumerate(zip(self.layers, blocks)):
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
dataloader = dgl.dataloading.NodeDataLoader(
g,
th.arange(g.number_of_nodes()),
sampler,
batch_size=args.batch_size,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
for input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader):
block = blocks[0].to(device)
h = x[input_nodes].to(device)
h = layer(block, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[output_nodes] = h.cpu()
x = y
return y
class CrossEntropyLoss(nn.Module):
def forward(self, block_outputs, pos_graph, neg_graph):
with pos_graph.local_scope():
pos_graph.ndata['h'] = block_outputs
pos_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
pos_score = pos_graph.edata['score']
with neg_graph.local_scope():
neg_graph.ndata['h'] = block_outputs
neg_graph.apply_edges(fn.u_dot_v('h', 'h', 'score'))
neg_score = neg_graph.edata['score']
score = th.cat([pos_score, neg_score])
label = th.cat([th.ones_like(pos_score), th.zeros_like(neg_score)]).long()
loss = F.binary_cross_entropy_with_logits(score, label.float())
return loss
def compute_acc(emb, labels, train_nids, val_nids, test_nids):
"""
Compute the accuracy of prediction given the labels.
"""
emb = emb.cpu().numpy()
labels = labels.cpu().numpy()
train_nids = train_nids.cpu().numpy()
train_labels = labels[train_nids]
val_nids = val_nids.cpu().numpy()
val_labels = labels[val_nids]
test_nids = test_nids.cpu().numpy()
test_labels = labels[test_nids]
emb = (emb - emb.mean(0, keepdims=True)) / emb.std(0, keepdims=True)
lr = lm.LogisticRegression(multi_class='multinomial', max_iter=10000)
lr.fit(emb[train_nids], train_labels)
pred = lr.predict(emb)
f1_micro_eval = skm.f1_score(val_labels, pred[val_nids], average='micro')
f1_micro_test = skm.f1_score(test_labels, pred[test_nids], average='micro')
return f1_micro_eval, f1_micro_test
def evaluate(model, g, inputs, labels, train_nids, val_nids, test_nids, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
# single gpu
if isinstance(model, SAGE):
pred = model.inference(g, inputs, batch_size, device)
# multi gpu
else:
pred = model.module.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred, labels, train_nids, val_nids, test_nids)
#### Entry point
def run(proc_id, n_gpus, args, devices, data):
# Unpack data
device = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
train_mask, val_mask, test_mask, in_feats, labels, n_classes, g = data
train_nid = th.LongTensor(np.nonzero(train_mask)).squeeze()
val_nid = th.LongTensor(np.nonzero(val_mask)).squeeze()
test_nid = th.LongTensor(np.nonzero(test_mask)).squeeze()
#train_nid = th.LongTensor(np.nonzero(train_mask)[0])
#val_nid = th.LongTensor(np.nonzero(val_mask)[0])
#test_nid = th.LongTensor(np.nonzero(test_mask)[0])
# Create PyTorch DataLoader for constructing blocks
n_edges = g.number_of_edges()
train_seeds = np.arange(n_edges)
if n_gpus > 0:
num_per_gpu = (train_seeds.shape[0] + n_gpus -1) // n_gpus
train_seeds = train_seeds[proc_id * num_per_gpu :
(proc_id + 1) * num_per_gpu \
if (proc_id + 1) * num_per_gpu < train_seeds.shape[0]
else train_seeds.shape[0]]
# Create sampler
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.EdgeDataLoader(
g, train_seeds, sampler, exclude='reverse_id',
# For each edge with ID e in Reddit dataset, the reverse edge is e ± |E|/2.
reverse_eids=th.cat([
th.arange(n_edges // 2, n_edges),
th.arange(0, n_edges // 2)]),
negative_sampler=NegativeSampler(g, args.num_negs),
batch_size=args.batch_size,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, args.num_hidden, args.num_layers, F.relu, args.dropout)
model = model.to(device)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[device], output_device=device)
loss_fcn = CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_pos = []
iter_neg = []
iter_d = []
iter_t = []
best_eval_acc = 0
best_test_acc = 0
for epoch in range(args.num_epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
tic_step = time.time()
for step, (input_nodes, pos_graph, neg_graph, blocks) in enumerate(dataloader):
batch_inputs = load_subtensor(g, input_nodes, device)
d_step = time.time()
pos_graph = pos_graph.to(device)
neg_graph = neg_graph.to(device)
blocks = [block.int().to(device) for block in blocks]
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, pos_graph, neg_graph)
optimizer.zero_grad()
loss.backward()
optimizer.step()
t = time.time()
pos_edges = pos_graph.number_of_edges()
neg_edges = neg_graph.number_of_edges()
iter_pos.append(pos_edges / (t - tic_step))
iter_neg.append(neg_edges / (t - tic_step))
iter_d.append(d_step - tic_step)
iter_t.append(t - d_step)
if step % args.log_every == 0:
gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
print('[{}]Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f}|{:.4f} | Load {:.4f}| train {:.4f} | GPU {:.1f} MiB'.format(
proc_id, epoch, step, loss.item(), np.mean(iter_pos[3:]), np.mean(iter_neg[3:]), np.mean(iter_d[3:]), np.mean(iter_t[3:]), gpu_mem_alloc))
tic_step = time.time()
if step % args.eval_every == 0 and proc_id == 0:
eval_acc, test_acc = evaluate(model, g, g.ndata['features'], labels, train_nid, val_nid, test_nid, args.batch_size, device)
print('Eval Acc {:.4f} Test Acc {:.4f}'.format(eval_acc, test_acc))
if eval_acc > best_eval_acc:
best_eval_acc = eval_acc
best_test_acc = test_acc
print('Best Eval Acc {:.4f} Test Acc {:.4f}'.format(best_eval_acc, best_test_acc))
if n_gpus > 1:
th.distributed.barrier()
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
def main(args, devices):
# load reddit data
data = RedditDataset(self_loop=True)
n_classes = data.num_classes
g = data[0]
features = g.ndata['feat']
in_feats = features.shape[1]
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
g.ndata['features'] = features
# Create csr/coo/csc formats before launching training processes with multi-gpu.
# This avoids creating certain formats in each sub-process, which saves momory and CPU.
g.create_formats_()
# Pack data
data = train_mask, val_mask, test_mask, in_feats, labels, n_classes, g
n_gpus = len(devices)
if devices[0] == -1:
run(0, 0, args, ['cpu'], data)
elif n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=thread_wrapped_func(run),
args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--gpu", type=str, default='0',
help="GPU, can be a list of gpus for multi-gpu trianing, e.g., 0,1,2,3; -1 for CPU")
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--num-negs', type=int, default=1)
argparser.add_argument('--neg-share', default=False, action='store_true',
help="sharing neg nodes for positive nodes")
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=10000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=1000)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
main(args, devices)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight bitcoinprivate client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum_bitcoinprivate import keystore, simple_config
from electrum_bitcoinprivate.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_bitcoinprivate import constants
from electrum_bitcoinprivate.plugins import run_hook
from electrum_bitcoinprivate.i18n import _
from electrum_bitcoinprivate.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword)
from electrum_bitcoinprivate import Transaction
from electrum_bitcoinprivate import util, bitcoin, commands, coinchooser
from electrum_bitcoinprivate import paymentrequest
from electrum_bitcoinprivate.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_bitcoinprivate.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self._old_excepthook = None
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros', 8))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-bitcoinprivate.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-bitcoinprivate Testnet" if constants.net.TESTNET else "Electrum-bitcoinprivate"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend bitcoinprivate coins with it."),
_("Make sure you own the seed phrase or the private keys, before you request bitcoinprivate coins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum-bitcoinprivate was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum-bitcoinprivate preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
#help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://github.com/BTCPrivate/electrum-bitcoinprivate"))
help_menu.addSeparator()
#help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://github.com/BTCPrivate/electrum-bitcoinprivate")).setShortcut(QKeySequence.HelpContents)
#self._auto_crash_reports = QAction(_("&Automated Crash Reports"), self, checkable=True)
#self._auto_crash_reports.setChecked(self.config.get("show_crash_reporter", default=False))
#self._auto_crash_reports.triggered.connect(self.auto_crash_reports)
#help_menu.addAction(self._auto_crash_reports)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def auto_crash_reports(self, state):
self.config.set_key("show_crash_reporter", state)
self.setup_exception_hook()
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('bitcoinprivate:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-bitcoinprivate",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum-bitcoinprivate focus is speed, with low resource usage and simplifying bitcoinprivate. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the bitcoinprivate system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/BTCPrivate/electrum-bitcoinprivate/issues\">https://github.com/BTCPrivate/electrum-bitcoinprivate/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum-bitcoinprivate (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-bitcoinprivate - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-bitcoinprivate", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-bitcoinprivate", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return '%s sat/kB' % round(fee_rate)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'uBTCP'
if self.decimal_point == 5:
return 'mBTCP'
if self.decimal_point == 8:
return 'BTCP'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('bitcoinprivate address where the payment should be received. Note that each payment request uses a different bitcoinprivate address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding bitcoinprivate addresses.'),
_('The bitcoinprivate address never expires and will always be part of this electrum-bitcoinprivate wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a bitcoinprivate address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a bitcoinprivate address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('bitcoinprivate transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_kb())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum-bitcoinprivate tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(30)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', True):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = displayed_feerate
else:
# fallback to actual fee
displayed_feerate = fee // size if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size / 1000) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = round(displayed_fee * 1000 / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('bitcoinprivate Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid bitcoinprivate Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoinprivate URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_kb())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_bitcoinprivate.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid bitcoinprivate address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid bitcoinprivate address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_bitcoinprivate.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum-bitcoinprivate was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_bitcoinprivate import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoinprivate URI
if str(data).startswith("bitcoinprivate:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum-bitcoinprivate was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_bitcoinprivate import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-bitcoinprivate-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum-bitcoinprivate was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_bitcoinprivate.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_bitcoinprivate.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', False)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', True))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BTCP', 'mBTCP', 'uBTCP']
msg = (_('Base unit of your wallet.')
+ '\n1 BTCP = 1000 mBTCP. 1 mBTCP = 1000 uBTCP.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BTCP':
self.decimal_point = 8
elif unit_result == 'mBTCP':
self.decimal_point = 5
elif unit_result == 'uBTCP':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_bitcoinprivate import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum-bitcoinprivate to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum-bitcoinprivate Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), _("Transaction added to wallet history"))
return True
|
test_browser.py
|
import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
if EM_BUILD_VERBOSE_LEVEL >= 3:
print >> sys.stderr, "using Emscripten browser: " + str(cmd)
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
self.browser_timeout = 20
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1']) # is the default anyhow
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html', '-s', 'DEMANGLE_SUPPORT=1']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, FILE_PACKAGER, os.path.join(self.get_dir(), 'somefile.data'), '--use-preload-cache', '--indexedDB-name=testdb', '--preload', os.path.join(self.get_dir(), 'somefile.txt'), '--js-output=' + os.path.join(self.get_dir(), 'somefile.js')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'test.js'), '--pre-js', 'somefile.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
open(self.in_dir("data.txt"), "w").write('''data''');
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
int result = 0;
REPORT_RESULT();
return 0;
}
'''))
open(os.path.join(self.get_dir(), 'on_window_error_shell.html'), 'w').write(r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
filePackagePrefixURL: "''' + assetLocalization + r'''",
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>'''
)
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
shutil.move('test.data','missing.data');
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/");
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html']).communicate()
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod, html_mod = lambda x: x):
open(to + '.html', 'w').write(html_mod(open('test.html').read().replace('test.js', to + '.js')))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
copy('two', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'),
lambda original: original.replace('function doReftest() {', 'function doReftest() { return; ')) # don't reftest on main thread, it would race
self.run_browser('three.html', None, ['/report_result?999'])
copy('three', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1']).communicate()
open('test.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('{{{ SCRIPT }}}', '<script src="test.js"></script>'))
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', "SAFE_HEAP=1"]
]:
print delay, defines, emterps
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//Module.print('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//Module.print('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keypress(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_fflush(self):
return self.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
self.btest('test_fflush.cpp', '0', args=['-s', 'NO_EXIT_RUNTIME=1', '--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM=1'])
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''] + extra)
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_fs_memfs_fsync(self):
args = ['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'];
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', force_c=True, args=args + mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main']'''])
def test_fs_workerfs_read(self):
secret = 'a' * 10;
secret2 = 'b' * 10;
open(self.in_dir('pre.js'), 'w').write('''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', force_c=True, args=['--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker'])
def test_fs_workerfs_package(self):
open('file1.txt', 'w').write('first')
if not os.path.exists('sub'): os.makedirs('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
Popen([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js']).communicate()
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['--proxy-to-worker'])
def test_fs_lz4fs_package(self):
# generate data
import random
self.clear()
os.mkdir('subdir')
open('file1.txt', 'wb').write('0123456789' * (1024*128))
open(os.path.join('subdir', 'file2.txt'), 'wb').write('1234567890' * (1024*128))
random_data = [chr(random.randint(0,255)) for x in range(1024*128*10 + 1)]
random_data[17] = 'X'
open('file3.txt', 'wb').write(''.join(random_data))
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print 'emcc-normal'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'], timeout=60)
assert os.stat('file1.txt').st_size + os.stat(os.path.join('subdir', 'file2.txt')).st_size + os.stat('file3.txt').st_size == 3*1024*128*10 + 1
assert os.stat('test.data').st_size < (3*1024*128*10)/2 # over half is gone
print ' emcc-opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'], timeout=60)
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print 'normal'
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1'], timeout=60)
print ' opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-O2'], timeout=60)
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print 'manual'
subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1'], timeout=60)
print ' opts'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-O2'], timeout=60)
print ' opts+closure'
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-O2', '--closure', '1', '-g1'], timeout=60)
'''# non-lz4 for comparison
try:
os.mkdir('files')
except:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([PYTHON, FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'], timeout=60)'''
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=75000000'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html')])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1'],
message='You should see a blue triangle.')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=[])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print 'proxy', proxy
self.btest('hello_world_gles.c', expected=map(str, range(30, 500)), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST'] + (['--proxy-to-worker'] if proxy else []), timeout=30)
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args, timeout=30)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
def test_emscripten_main_loop(self):
self.btest('emscripten_main_loop.cpp', '0')
def test_emscripten_main_loop_settimeout(self):
self.btest('emscripten_main_loop_settimeout.cpp', '1')
def test_emscripten_main_loop_and_blocker(self):
self.btest('emscripten_main_loop_and_blocker.cpp', '0')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1')
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0')
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'RELOCATABLE=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
for opts in [0, 1]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=['-O' + str(opts), 'side.c', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_u4fv_2(self):
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('cubegeom_u4fv_2.c', reference='cubegeom_u4fv_2.png', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'SPLIT_MEMORY=16777216']) # check for uniform4fv slice being valid in split memory
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
try:
print 'Crunch is located at ' + CRUNCH
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
def test(args):
print args
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
open('text.txt', 'w').write('123')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'] + args, stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
test([])
test(['text.txt']) # also package a non-crunch file
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
try:
print 'Crunch is located at ' + CRUNCH
except:
return self.skip('Skipped: Crunch is not present on the current system. Please install it (manually or via emsdk) and make sure it is activated in the Emscripten configuration file.')
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
main, supp = self.setup_runtimelink_test()
open('supp.cpp', 'w').write(supp)
Popen([PYTHON, EMCC, 'supp.cpp', '-o', 'supp.js', '-s', 'SIDE_MODULE=1', '-O2']).communicate()
self.btest(main, args=['-DBROWSER=1', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.js"]'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print what, status
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
Module.print('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
Module.print('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:8888/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 1000);
'''
open('pre_main.js', 'w').write(r'''
Module._main = function(){
myJSCallback();
return 0;
};
''')
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_main.js'], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
print '\n', filename, extra_args
print 'mem init, so async, call too early'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1'] + extra_args)
print 'sync startup, call too late'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args)
print 'sync, runtime still alive, so all good'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=1'] + extra_args)
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
def test_module(self):
return self.skip('non-fastcomp is deprecated and fails in 3.5')
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if emscripten_browser is not None:
# If EMSCRIPTEN_BROWSER carried command line arguments to pass to the browser, (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun, so strip them out.
browser_name = shlex.split(emscripten_browser)[0]
args += ['--browser', browser_name]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler'], timeout=30)
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0', timeout=20)
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts, expected='0', timeout=20)
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-s', 'NO_EXIT_RUNTIME=1'], expected='0', timeout=20)
def test_webgl2(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'USE_WEBGL2=1'] + opts, expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print 'opt level', opt_level
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, but browser has support, so semantics are like float
# now use a shell to remove the browser's fround support
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', '''
Math.fround = null;
var Module = {
'''))
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=2', '--separate-asm']) # empty polyfill, no browser support, so semantics are like double
# finally, remove fround, patch up fround as the code executes (after polyfilling etc.), to verify that we got rid of it entirely on the client side
fixer = 'python fix.py'
open('fix.py', 'w').write(r'''
import sys
filename = sys.argv[1]
js = open(filename).read()
replaced = js.replace("var Math_fround = Math.fround;", "var Math_fround = Math.fround = function(x) { return 0; }")
assert js != replaced
open(filename, 'w').write(replaced)
''')
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer]) # no fround anyhow
self.btest(path_from_root('tests', 'codemods.cpp'), expected='121378', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=1']) # proper polyfill was enstated, then it was replaced by the fix so 0 is returned all the time, hence a different result here
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=2', '--separate-asm']) # we should remove the calls to the polyfill ENTIRELY here, on the clientside, so we should NOT see any calls to fround here, and result should be like double
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print 'asyncify+emterpreter'
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT();
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html']).communicate()
os.mkdir('sub')
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print 'in html'
open('shell.html', 'w').write('''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html'] + args).communicate()
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
open('src.cpp', 'w').write(self.with_report_result(r'''
#include<stdio.h>
#include<emscripten.h>
int main() {
int result = EM_ASM_INT_V({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT();
return 0;
}
'''))
in_html('200', ['-s', 'FORCE_FILESYSTEM=1'])
def test_glfw3(self):
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2"], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3"], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['_main'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print opts
opts += ['-s', 'NO_EXIT_RUNTIME=1', '--pre-js', 'run.js', '-s', 'SWAPPABLE_ASM_MODULE=1'] # important that both modules are built with the same opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print 'Skipping asm validation check, spidermonkey is not configured'
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED','-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, c);
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?712', timeout=30)
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?572')
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
open('test.html', 'w').write(html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window',
timeout=30)
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)], timeout=20)
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'], timeout=20)
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_emterpreter_async_sleep2_safeheap(self):
# check that safe-heap machinery does not cause errors in async operations
self.btest('emterpreter_async_sleep2_safeheap.cpp', '17', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz', '-profiling', '-s', 'SAFE_HEAP=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_WHITELIST=["_main","_callback","_fix"]'])
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP=1'], timeout=60)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'], timeout=30)
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'], timeout=30)
def test_emterpreter_async_iostream(self):
self.btest('emterpreter_async_iostream.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
for args, code in [
([], 'Module();'), # defaults
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld();
'''), # use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''), # pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''), # similar, but without a mem init file, everything is sync and simple
]:
print 'test on', opts, args, code
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'])
def test_dynamic_link(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = Module.print;
Module.print = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
int result = 2;
REPORT_RESULT();
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js']).communicate()
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE=1', '-O2', '--pre-js', 'pre.js'])
def test_dynamic_link_glemu(self):
open('pre.js', 'w').write('''
Module.dynamicLibraries = ['side.js'];
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
int result = 1;
REPORT_RESULT();
return 0;
}
''')
open('side.cpp', 'w').write(r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
Popen([PYTHON, EMCC, 'side.cpp', '-s', 'SIDE_MODULE=1', '-O2', '-o', 'side.js']).communicate()
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE=1', '-O2', '-s', 'LEGACY_GL_EMULATION=1', '--pre-js', 'pre.js'])
def test_memory_growth_during_startup(self):
open('data.dat', 'w').write('X' * (30*1024*1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS=1', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'TOTAL_MEMORY=10000', '-s', 'TOTAL_STACK=5000', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
open('html.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
# Test that the emscripten_ atomics api functions work.
def test_pthread_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=120) # extra time on first test, to be sure to build all libraries
# Test 64-bit atomics.
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test 64-bit C++11 atomics.
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS=1']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads + ['-std=c++11'], timeout=30)
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
def test_pthread_gcc_atomic_fetch_and_op(self):
# We need to resort to using regexes to optimize out SharedArrayBuffer when pthreads are not supported, which is brittle!
# Therefore perform very extensive testing of different codegen modes to catch any problems.
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-O3', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1'], ['-Os'], ['-Oz']]:
for debug in [[], ['-g1'], ['-g2'], ['-g4']]:
for f32 in [[], ['-s', 'PRECISE_F32=1']]:
print opt, debug, f32
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=opt+debug+f32+['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=60)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# 64 bit version of the above test.
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the rest of the remaining GCC atomics after the two above tests.
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test that basic thread creation works.
def test_pthread_create(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [['-s', 'USE_PTHREADS=1'], ['-s', 'USE_PTHREADS=2', '--separate-asm']]:
print str(opt) + ' ' + str(pthreads)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
if 'USE_PTHREADS=2' in pthreads:
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'), expected='0', args=opt + pthreads + ['-s', 'PTHREAD_POOL_SIZE=8', '--shell-file', 'html.html'], timeout=30)
# Test that a pthread can spawn another pthread of its own.
def test_pthread_create_pthread(self):
for opt in [['-s', 'USE_PTHREADS=2', '--separate-asm'], ['-s', 'USE_PTHREADS=1', '--proxy-to-worker']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=opt + ['-O3', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'NO_EXIT_RUNTIME=1'], timeout=30)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=2'], timeout=30)
# Test that main thread can wait for a pthread to finish via pthread_join().
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_cancel() operation
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test pthread_kill() operation
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Tests the pthread mutex api.
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=20)
# Test that memory allocation is thread-safe.
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'TOTAL_MEMORY=268435456'], timeout=30)
# Test that the pthread_barrier API works ok.
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread_once() function.
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test against a certain thread exit time handling bug by spawning tons of threads.
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, timeout=30)
# Test thread-specific data (TLS).
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test the pthread condition variable creation and waiting.
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8'], timeout=30)
# Test that pthreads are able to do printf.
def test_pthread_printf(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the main thread is able to use pthread_set/getspecific.
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '--shell-file', 'html.html'], timeout=30)
# Test the -s PTHREAD_HINT_NUM_CORES=x command line variable.
def test_pthread_num_logical_cores(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2'], timeout=30)
self.prep_no_SAB()
self.btest(path_from_root('tests', 'pthread', 'test_pthread_num_logical_cores.cpp'), expected='0', args=['-O3', '-g', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_HINT_NUM_CORES=2', '--shell-file', 'html.html'], timeout=30)
# Test that pthreads have access to filesystem.
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=1'], timeout=30)
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS=2', '--separate-asm', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args, timeout=30)
def test_pthread_separate_asm_pthreads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8', '--separate-asm', '--profiling'], timeout=30)
def test_pthread_custom_pthread_main_url(self):
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT();
}
'''))
# Test that it is possible to define "Module.pthreadMainPrefixURL" string to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { pthreadMainPrefixURL: "cdn/", '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test.html']).communicate()
shutil.move('pthread-main.js', os.path.join('cdn', 'pthread-main.js'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where pthread-main.js will be loaded from.
open(self.in_dir('shell2.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "pthread-main.js") return "cdn/pthread-main.js"; else return filename; }, '))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell2.html', '-s', 'IN_TEST_HARNESS=1', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=1', '-o', 'test2.html']).communicate()
try_delete('pthread-main.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS=2', '-s', 'PTHREAD_POOL_SIZE=1', '--separate-asm'], timeout=30)
# test atomicrmw i64
def test_atomicrmw_i64(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'atomicrmw_i64.ll'), '-s', 'USE_PTHREADS=1', '-s', 'IN_TEST_HARNESS=1', '-o', 'test.html']).communicate()
self.run_browser('test.html', None, '/report_result?0')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'], timeout=30)
def test_meminit_pairs(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join(''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256))
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_meminit_big(self):
d = 'const char *data[] = {\n "'
d += '",\n "'.join([''.join('\\x{:02x}\\x{:02x}'.format(i, j)
for j in range(256)) for i in range(256)]*256)
with open(path_from_root('tests', 'meminit_pairs.c')) as f:
d += '"\n};\n' + f.read()
assert len(d) > (1 << 27) # more than 32M memory initializer
args = ["-O2", "--memory-init-file", "0", "-s", "MEM_INIT_METHOD=2", "-s", "ASSERTIONS=1"]
self.btest(d, expected='0', args=args + ["--closure", "0"])
self.btest(d, expected='0', args=args + ["--closure", "0", "-g"])
self.btest(d, expected='0', args=args + ["--closure", "1"])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_separate_asm(self):
for opts in [['-O0'], ['-O1'], ['-O2'], ['-O2', '--closure', '1']]:
print opts
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.html'] + opts).communicate()
self.run_browser('test.html', None, '/report_result?0')
open('one.html', 'w').write('<script src="test.js"></script>')
self.run_browser('one.html', None, '/report_result?0')
Popen([PYTHON, path_from_root('tools', 'separate_asm.py'), 'test.js', 'asm.js', 'rest.js']).communicate()
open('two.html', 'w').write('''
<script>
var Module = {};
</script>
<script src="asm.js"></script>
<script src="rest.js"></script>
''')
self.run_browser('two.html', None, '/report_result?0')
self.clear()
assert not os.path.exists('tests.asm.js')
self.btest('browser_test_hello_world.c', expected='0', args=opts + ['--separate-asm'])
assert os.path.exists('test.asm.js')
os.unlink('test.asm.js')
self.run_browser('test.html', None, '[no http server activity]', timeout=5) # fail without the asm
def test_emterpretify_file(self):
open('shell.html', 'w').write('''
<!--
{{{ SCRIPT }}} // ignore this, we do it ourselves
-->
<script>
var Module = {};
var xhr = new XMLHttpRequest();
xhr.open('GET', 'code.dat', true);
xhr.responseType = 'arraybuffer';
xhr.onload = function() {
Module.emterpreterFile = xhr.response;
var script = document.createElement('script');
script.src = "test.js";
document.body.appendChild(script);
};
xhr.send(null);
</script>
''')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '--shell-file', 'shell.html', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
try_delete('code.dat');
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_FILE="code.dat"', '-O2', '-g', '-s', 'ASSERTIONS=1'])
assert os.path.exists('code.dat')
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print opts
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'browser_test_hello_world.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker']).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
for o in [0, 1, 2]:
print o
opts = ['-O' + str(o)]
print 'plain html'
open('src.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'in_flight_memfile_request.c')).read()))
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'test.js'] + opts).communicate()
open('test.html', 'w').write('<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print 'default html'
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
def test_split_memory_large_file(self):
size = 2*1024*1024
open('huge.dat', 'w').write(''.join([chr((x*x)&255) for x in range(size*2)])) # larger than a memory chunk
self.btest('split_memory_large_file.cpp', expected='1', args=['-s', 'SPLIT_MEMORY=' + str(size), '-s', 'TOTAL_MEMORY=100000000', '-s', 'TOTAL_STACK=10240', '--preload-file', 'huge.dat'], timeout=60)
def test_binaryen(self):
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"'])
self.btest('browser_test_hello_world.c', expected='0', args=['-s', 'BINARYEN=1', '-s', 'BINARYEN_METHOD="interpret-binary"', '-O2'])
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger
from pyspark.storagelevel import StorageLevel
from pyspark.sql import SQLContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class TestMerger(unittest.TestCase):
def setUp(self):
self.N = 1 << 16
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
class TestRDDFunctions(PySparkTestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.sc.stop()
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
self.sc = SparkContext("local")
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist(StorageLevel.MEMORY_ONLY_SER)
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestInputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
self.sc._jvm.WriteInputFormatTestDataGenerator.generateData(self.tempdir.name, self.sc._jsc)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class TestOutputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
@unittest.skipIf(sys.version_info[:2] <= (2, 6), "Skipped on 2.6 until SPARK-2951 is fixed")
def test_newhadoop(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, numSlices=len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, numSlices=len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class TestWorker(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
def test_fd_leak(self):
N = 1100 # fd limit is 1024 by default
rdd = self.sc.parallelize(range(N), N)
self.assertEquals(N, rdd.count())
class TestSparkSubmit(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
sim.py
|
import asyncio
import time as ttime
from collections import deque, OrderedDict
import itertools
import numpy as np
import random
import threading
from tempfile import mkdtemp
import os
import warnings
import weakref
import uuid
import copy
import logging
from .signal import Signal, EpicsSignal, EpicsSignalRO
from .status import DeviceStatus, StatusBase
from .device import (Device, Component, Component as C,
DynamicDeviceComponent as DDC, Kind)
from types import SimpleNamespace
from .pseudopos import (PseudoPositioner, PseudoSingle,
real_position_argument, pseudo_position_argument)
from .positioner import SoftPositioner
from .utils import DO_NOT_USE, ReadOnlyError, LimitError
logger = logging.getLogger(__name__)
# two convenience functions 'vendored' from bluesky.utils
def new_uid():
return str(uuid.uuid4())
def short_uid(label=None, truncate=6):
"Return a readable but unique id like 'label-fjfi5a'"
if label:
return '-'.join([label, new_uid()[:truncate]])
else:
return new_uid()[:truncate]
class NullStatus(StatusBase):
"A simple Status object that is always immediately done, successfully."
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._finished(success=True)
class SynSignal(Signal):
"""
A synthetic Signal that evaluates a Python function when triggered.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
loop : asyncio.EventLoop, optional
used for ``subscribe`` updates; uses ``asyncio.get_event_loop()`` if
unspecified
"""
# This signature is arranged to mimic the signature of EpicsSignal, where
# the Python function (func) takes the place of the PV.
def __init__(self, func=None, *,
name, # required, keyword-only
exposure_time=0,
precision=3,
parent=None,
labels=None,
kind=None,
loop=None):
if func is None:
# When triggered, just put the current value.
func = self.get
# Initialize readback with a None value
self._readback = None
if loop is None:
loop = asyncio.get_event_loop()
self._func = func
self.exposure_time = exposure_time
self.precision = 3
self.loop = loop
super().__init__(value=self._func(), timestamp=ttime.time(), name=name,
parent=parent, labels=labels, kind=kind)
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.precision
return res
def trigger(self):
delay_time = self.exposure_time
if delay_time:
st = DeviceStatus(device=self)
if self.loop.is_running():
def update_and_finish():
self.put(self._func())
st._finished()
self.loop.call_later(delay_time, update_and_finish)
else:
def sleep_and_finish():
ttime.sleep(delay_time)
self.put(self._func())
st._finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
return st
else:
self.put(self._func())
return NullStatus()
def get(self):
# Get a new value, which allows us to synthesize noisy data, for
# example.
return super().get()
class SignalRO(Signal):
def put(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
def set(self, value, *, timestamp=None, force=False):
raise ReadOnlyError("The signal {} is readonly.".format(self.name))
class SynSignalRO(SignalRO, SynSignal):
pass
def periodic_update(ref, period, period_jitter):
while True:
signal = ref()
if not signal:
# Our target Signal has been garbage collected. Shut down the
# Thread.
return
signal.put(signal._func())
del signal
# Sleep for period +/- period_jitter.
ttime.sleep(max(period + period_jitter * np.random.randn(), 0))
class SynPeriodicSignal(SynSignal):
"""
A synthetic Signal that evaluates a Python function periodically.
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal generates white noise on [0, 1].
name : string, keyword only
period : number, optional
How often the Signal's value is updated in the background. Default is
1 second.
period_jitter : number, optional
Random Gaussian variation of the period. Default is 1 second.
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
loop : asyncio.EventLoop, optional
used for ``subscribe`` updates; uses ``asyncio.get_event_loop()`` if
unspecified
"""
def __init__(self, func=None, *,
name, # required, keyword-only
period=1, period_jitter=1,
exposure_time=0,
parent=None,
labels=None,
kind=None,
loop=None):
if func is None:
func = np.random.rand
super().__init__(name=name, func=func,
exposure_time=exposure_time,
parent=parent, labels=labels, kind=kind, loop=loop)
self.__thread = threading.Thread(target=periodic_update, daemon=True,
args=(weakref.ref(self),
period,
period_jitter))
self.__thread.start()
class ReadbackSignal(SignalRO):
def get(self):
return self.parent.sim_state['readback']
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['readback_ts']
class SetpointSignal(Signal):
def put(self, value, *, timestamp=None, force=False):
self.parent.set(value)
def get(self):
return self.parent.sim_state['setpoint']
def describe(self):
res = super().describe()
# There should be only one key here, but for the sake of generality....
for k in res:
res[k]['precision'] = self.parent.precision
return res
@property
def timestamp(self):
'''Timestamp of the readback value'''
return self.parent.sim_state['setpoint_ts']
class SynAxisNoHints(Device):
"""
A synthetic settable Device mimic any 1D Axis (position, temperature).
Parameters
----------
name : string, keyword only
readback_func : callable, optional
When the Device is set to ``x``, its readback will be updated to
``f(x)``. This can be used to introduce random noise or a systematic
offset.
Expected signature: ``f(x) -> value``.
value : object, optional
The initial value. Default is 0.
delay : number, optional
Simulates how long it takes the device to "move". Default is 0 seconds.
precision : integer, optional
Digits of precision. Default is 3.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
kind : a member the Kind IntEnum (or equivalent integer), optional
Default is Kind.normal. See Kind for options.
loop : asyncio.EventLoop, optional
used for ``subscribe`` updates; uses ``asyncio.get_event_loop()`` if
unspecified
"""
readback = Component(ReadbackSignal, value=None)
setpoint = Component(SetpointSignal, value=None)
SUB_READBACK = 'readback'
_default_sub = SUB_READBACK
def __init__(self, *,
name,
readback_func=None, value=0, delay=0,
precision=3,
parent=None,
labels=None,
kind=None,
loop=None):
if readback_func is None:
def readback_func(x):
return x
if loop is None:
loop = asyncio.get_event_loop()
self.sim_state = {}
self._readback_func = readback_func
self.delay = delay
self.precision = precision
self.loop = loop
# initialize values
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.sim_state['readback'] = readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
super().__init__(name=name, parent=parent, labels=labels, kind=kind)
self.readback.name = self.name
def set(self, value):
old_setpoint = self.sim_state['setpoint']
self.sim_state['setpoint'] = value
self.sim_state['setpoint_ts'] = ttime.time()
self.setpoint._run_subs(sub_type=self.setpoint.SUB_VALUE,
old_value=old_setpoint,
value=self.sim_state['setpoint'],
timestamp=self.sim_state['setpoint_ts'])
def update_state():
old_readback = self.sim_state['readback']
self.sim_state['readback'] = self._readback_func(value)
self.sim_state['readback_ts'] = ttime.time()
self.readback._run_subs(sub_type=self.readback.SUB_VALUE,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
self._run_subs(sub_type=self.SUB_READBACK,
old_value=old_readback,
value=self.sim_state['readback'],
timestamp=self.sim_state['readback_ts'])
if self.delay:
st = DeviceStatus(device=self)
if self.loop.is_running():
def update_and_finish():
update_state()
st._finished()
self.loop.call_later(self.delay, update_and_finish)
else:
def sleep_and_finish():
ttime.sleep(self.delay)
update_state()
st._finished()
threading.Thread(target=sleep_and_finish, daemon=True).start()
return st
else:
update_state()
return NullStatus()
@property
def position(self):
return self.readback.get()
class SynAxis(SynAxisNoHints):
readback = Component(ReadbackSignal, value=None, kind=Kind.hinted)
class SynGauss(SynSignal):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : string
motor : Device
motor_field : string
center : number
center of peak
Imax : number
max intensity of peak
sigma : number, optional
Default is 1.
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak.
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def __init__(self, name, motor, motor_field, center, Imax, sigma=1,
noise=None, noise_multiplier=1, random_state=None, **kwargs):
if noise not in ('poisson', 'uniform', None):
raise ValueError("noise must be one of 'poisson', 'uniform', None")
self._motor = motor
if random_state is None:
random_state = np.random
def func():
m = motor.read()[motor_field]['value']
v = Imax * np.exp(-(m - center) ** 2 / (2 * sigma ** 2))
if noise == 'poisson':
v = int(random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += random_state.uniform(-1, 1) * noise_multiplier
return v
super().__init__(func=func, name=name, **kwargs)
class Syn2DGauss(SynSignal):
"""
Evaluate a point on a Gaussian based on the value of a motor.
Parameters
----------
name : str
The name of the detector
motor0 : SynAxis
The 'x' coordinate of the 2-D gaussian blob
motor_field0 : str
The name field of the motor. Should be the key in motor0.describe()
motor1 : SynAxis
The 'y' coordinate of the 2-D gaussian blob
motor_field1 : str
The name field of the motor. Should be the key in motor1.describe()
center : iterable, optional
The center of the gaussian blob
Defaults to (0,0)
Imax : float, optional
The intensity at `center`
Defaults to 1
sigma : float, optional
Standard deviation for gaussian blob
Defaults to 1
noise : {'poisson', 'uniform', None}, optional
Add noise to the gaussian peak..
Defaults to None
noise_multiplier : float, optional
Only relevant for 'uniform' noise. Multiply the random amount of
noise by 'noise_multiplier'
Defaults to 1
random_state : numpy random state object, optional
np.random.RandomState(0), to generate random number with given seed
Example
-------
motor = SynAxis(name='motor')
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1)
"""
def __init__(self, name, motor0, motor_field0, motor1, motor_field1,
center, Imax, sigma=1, noise=None, noise_multiplier=1,
random_state=None, **kwargs):
if noise not in ('poisson', 'uniform', None):
raise ValueError("noise must be one of 'poisson', 'uniform', None")
self._motor = motor0
self._motor1 = motor1
if random_state is None:
random_state = np.random
def func():
x = motor0.read()[motor_field0]['value']
y = motor1.read()[motor_field1]['value']
m = np.array([x, y])
v = Imax * np.exp(-np.sum((m - center) ** 2) / (2 * sigma ** 2))
if noise == 'poisson':
v = int(random_state.poisson(np.round(v), 1))
elif noise == 'uniform':
v += random_state.uniform(-1, 1) * noise_multiplier
return v
super().__init__(name=name, func=func, **kwargs)
class TrivialFlyer:
"""Trivial flyer that complies to the API but returns empty data."""
name = 'trivial_flyer'
parent = None
def kickoff(self):
return NullStatus()
def describe_collect(self):
return {'stream_name': {}}
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def complete(self):
return NullStatus()
def collect(self):
for i in range(100):
yield {'data': {}, 'timestamps': {}, 'time': i, 'seq_num': i}
def stop(self, *, success=False):
pass
class MockFlyer:
"""
Class for mocking a flyscan API implemented with stepper motors.
"""
def __init__(self, name, detector, motor, start, stop, num, loop=None):
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = np.linspace(start, stop, num)
self._data = deque()
self._completion_status = None
if loop is None:
loop = asyncio.get_event_loop()
self.loop = loop
def __setstate__(self, val):
name, detector, motor, steps = val
self.name = name
self.parent = None
self._mot = motor
self._detector = detector
self._steps = steps
self._completion_status = None
self.loop = asyncio.get_event_loop()
def __getstate__(self):
return (self.name, self._detector, self._mot, self._steps)
def read_configuration(self):
return OrderedDict()
def describe_configuration(self):
return OrderedDict()
def describe_collect(self):
dd = dict()
dd.update(self._mot.describe())
dd.update(self._detector.describe())
return {'stream_name': dd}
def complete(self):
if self._completion_status is None:
raise RuntimeError("No collection in progress")
return self._completion_status
def kickoff(self):
if self._completion_status is not None:
raise RuntimeError("Already kicked off.")
self._data = deque()
self._future = self.loop.run_in_executor(None, self._scan)
st = DeviceStatus(device=self)
self._completion_status = st
self._future.add_done_callback(lambda x: st._finished())
return st
def collect(self):
if self._completion_status is None or not self._completion_status.done:
raise RuntimeError("No reading until done!")
self._completion_status = None
yield from self._data
def _scan(self):
"This will be run on a separate thread, started in self.kickoff()"
ttime.sleep(.1)
for p in self._steps:
stat = self._mot.set(p)
while True:
if stat.done:
break
ttime.sleep(0.01)
stat = self._detector.trigger()
while True:
if stat.done:
break
ttime.sleep(0.01)
event = dict()
event['time'] = ttime.time()
event['data'] = dict()
event['timestamps'] = dict()
for r in [self._mot, self._detector]:
d = r.read()
for k, v in d.items():
event['data'][k] = v['value']
event['timestamps'][k] = v['timestamp']
self._data.append(event)
def stop(self, *, success=False):
pass
class SynSignalWithRegistry(SynSignal):
"""
A SynSignal integrated with databroker.assets
Parameters
----------
func : callable, optional
This function sets the signal to a new value when it is triggered.
Expected signature: ``f() -> value``.
By default, triggering the signal does not change the value.
name : string, keyword only
exposure_time : number, optional
Seconds of delay when triggered (simulated 'exposure time'). Default is
0.
parent : Device, optional
Used internally if this Signal is made part of a larger Device.
loop : asyncio.EventLoop, optional
used for ``subscribe`` updates; uses ``asyncio.get_event_loop()`` if
unspecified
reg : Registry, optional
DEPRECATED. If used, this is ignored and a warning is issued. In a
future release, this parameter will be removed.
save_path : str, optional
Path to save files to, if None make a temp dir, defaults to None.
save_func : function, optional
The function to save the data, function signature must be:
`func(file_path, array)`, defaults to np.save
save_spec : str, optional
The spec for the save function, defaults to 'RWFS_NPY'
save_ext : str, optional
The extension to add to the file name, defaults to '.npy'
"""
def __init__(self, *args, reg=DO_NOT_USE, save_path=None,
save_func=np.save, save_spec='NPY_SEQ', save_ext='npy',
**kwargs):
super().__init__(*args, **kwargs)
self.save_func = save_func
self.save_ext = save_ext
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache = deque()
if save_path is None:
self.save_path = mkdtemp()
else:
self.save_path = save_path
self._spec = save_spec # spec name stored in resource doc
self._file_stem = None
self._path_stem = None
self._result = {}
if reg is not DO_NOT_USE:
warnings.warn("The parameter 'reg' is deprecated. It will be "
"ignored. In a future release the parameter will be "
"removed and passing a value for 'reg' will raise "
"an error.")
self.reg = reg
else:
self.reg = None
def stage(self):
self._file_stem = short_uid()
self._path_stem = os.path.join(self.save_path, self._file_stem)
self._datum_counter = itertools.count()
# This is temporarily more complicated than it will be in the future.
# It needs to support old configurations that have a registry.
resource = {'spec': self._spec,
'root': self.save_path,
'resource_path': self._file_stem,
'resource_kwargs': {},
'path_semantics': os.name}
# If a Registry is set, we need to allow it to generate the uid for us.
if self.reg is not None:
# register_resource has accidentally different parameter names...
self._resource_uid = self.reg.register_resource(
rpath=resource['resource_path'],
rkwargs=resource['resource_kwargs'],
root=resource['root'],
spec=resource['spec'],
path_semantics=resource['path_semantics'])
# If a Registry is not set, we need to generate the uid.
else:
self._resource_uid = new_uid()
resource['uid'] = self._resource_uid
self._asset_docs_cache.append(('resource', resource))
def trigger(self):
super().trigger()
# save file stash file name
self._result.clear()
for idx, (name, reading) in enumerate(super().read().items()):
# Save the actual reading['value'] to disk. For a real detector,
# this part would be done by the detector IOC, not by ophyd.
self.save_func('{}_{}.{}'.format(self._path_stem, idx,
self.save_ext), reading['value'])
# This is temporarily more complicated than it will be in the
# future. It needs to support old configurations that have a
# registry.
datum = {'resource': self._resource_uid,
'datum_kwargs': dict(index=idx)}
if self.reg is not None:
# If a Registry is set, we need to allow it to generate the
# datum_id for us.
datum_id = self.reg.register_datum(
datum_kwargs=datum['datum_kwargs'],
resource_uid=datum['resource'])
else:
# If a Registry is not set, we need to generate the datum_id.
datum_id = '{}/{}'.format(self._resource_uid,
next(self._datum_counter))
datum['datum_id'] = datum_id
self._asset_docs_cache.append(('datum', datum))
# And now change the reading in place, replacing the value with
# a reference to Registry.
reading['value'] = datum_id
self._result[name] = reading
return NullStatus()
def read(self):
return self._result
def describe(self):
res = super().describe()
for key in res:
res[key]['external'] = "FILESTORE"
return res
def collect_asset_docs(self):
items = list(self._asset_docs_cache)
self._asset_docs_cache.clear()
for item in items:
yield item
def unstage(self):
self._resource_uid = None
self._datum_counter = None
self._asset_docs_cache.clear()
self._file_stem = None
self._path_stem = None
self._result.clear()
class NumpySeqHandler:
specs = {'NPY_SEQ'}
def __init__(self, filename, root=''):
self._name = os.path.join(root, filename)
def __call__(self, index):
return np.load('{}_{}.npy'.format(self._name, index))
def get_file_list(self, datum_kwarg_gen):
"This method is optional. It is not needed for access, but for export."
return ['{name}_{index}.npy'.format(name=self._name, **kwargs)
for kwargs in datum_kwarg_gen]
class ABDetector(Device):
a = Component(SynSignal, func=random.random, kind=Kind.hinted)
b = Component(SynSignal, func=random.random)
def trigger(self):
return self.a.trigger() & self.b.trigger()
class DetWithCountTime(Device):
intensity = Component(SynSignal, func=lambda: 0, kind=Kind.hinted)
count_time = Component(Signal)
class DetWithConf(Device):
a = Component(SynSignal, func=lambda: 1, kind=Kind.hinted)
b = Component(SynSignal, func=lambda: 2, kind=Kind.hinted)
c = Component(SynSignal, func=lambda: 3)
d = Component(SynSignal, func=lambda: 4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.read_attrs = ['a', 'b']
self.configuration_attrs = ['c', 'd']
def trigger(self):
return self.a.trigger() & self.b.trigger()
class InvariantSignal(SynSignal):
# Always returns the same reading, including timestamp.
def read(self):
res = super().read()
for k in res:
res[k]['timestamp'] = 0
return res
def __repr__(self):
return "<INVARIANT REPR>"
class SPseudo3x3(PseudoPositioner):
pseudo1 = C(PseudoSingle, limits=(-10, 10), egu='a', kind=Kind.hinted)
pseudo2 = C(PseudoSingle, limits=(-10, 10), egu='b', kind=Kind.hinted)
pseudo3 = C(PseudoSingle, limits=None, egu='c', kind=Kind.hinted)
real1 = C(SoftPositioner, init_pos=0)
real2 = C(SoftPositioner, init_pos=0)
real3 = C(SoftPositioner, init_pos=0)
sig = C(Signal, value=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo2,
real3=-pseudo_pos.pseudo3)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1,
pseudo2=-real_pos.real2,
pseudo3=-real_pos.real3)
class SPseudo1x3(PseudoPositioner):
pseudo1 = C(PseudoSingle, limits=(-10, 10), kind=Kind.hinted)
real1 = C(SoftPositioner, init_pos=0)
real2 = C(SoftPositioner, init_pos=0)
real3 = C(SoftPositioner, init_pos=0)
@pseudo_position_argument
def forward(self, pseudo_pos):
pseudo_pos = self.PseudoPosition(*pseudo_pos)
# logger.debug('forward %s', pseudo_pos)
return self.RealPosition(real1=-pseudo_pos.pseudo1,
real2=-pseudo_pos.pseudo1,
real3=-pseudo_pos.pseudo1)
@real_position_argument
def inverse(self, real_pos):
real_pos = self.RealPosition(*real_pos)
# logger.debug('inverse %s', real_pos)
return self.PseudoPosition(pseudo1=-real_pos.real1)
class SynAxisNoPosition(SynAxis):
@property
def position(self):
raise AttributeError
def make_fake_device(cls):
"""
Inspect cls and construct a fake device that has the same structure.
This works by replacing EpicsSignal with FakeEpicsSignal and EpicsSignalRO
with FakeEpicsSignalRO. The fake class will be a subclass of the real
class.
This assumes that EPICS connections are done entirely in EpicsSignal and
EpicsSignalRO subcomponents. If this is not true, this will fail silently
on class construction and loudly when manipulating an object.
Parameters
----------
cls : Device
A real Device class to inspect and create a fake Device class from
Returns
-------
fake_device : Device
The resulting fake Device class
"""
# Cache to avoid repeating work.
# EpicsSignal and EpicsSignalRO begin in the cache.
if cls not in fake_device_cache:
if not issubclass(cls, Device):
# Ignore non-devices and non-epics-signals
logger.debug('Ignore cls=%s, bases are %s', cls, cls.__bases__)
fake_device_cache[cls] = cls
return cls
fake_dict = {}
# Update all the components recursively
for cpt_name in cls.component_names:
cpt = getattr(cls, cpt_name)
fake_cpt = copy.copy(cpt)
if isinstance(cpt, Component):
fake_cpt.cls = make_fake_device(cpt.cls)
logger.debug('switch cpt_name=%s to cls=%s',
cpt_name, fake_cpt.cls)
# DDCpt stores the classes in a different place
elif isinstance(cpt, DDC):
fake_defn = {}
for ddcpt_name, ddcpt_tuple in cpt.defn.items():
subcls = make_fake_device(ddcpt_tuple[0])
fake_defn[ddcpt_name] = [subcls] + list(ddcpt_tuple[1:])
fake_cpt.defn = fake_defn
else:
raise RuntimeError(("{} is not a component or a dynamic "
"device component. I don't know how you "
"found this error, should be impossible "
"to reach it.".format(cpt)))
fake_dict[cpt_name] = fake_cpt
fake_class = type('Fake{}'.format(cls.__name__), (cls,), fake_dict)
fake_device_cache[cls] = fake_class
logger.debug('fake_device_cache[%s] = %s', cls, fake_class)
return fake_device_cache[cls]
class FakeEpicsSignal(SynSignal):
"""
Fake version of EpicsSignal that's really just a SynSignal.
Wheras SynSignal is generally used to test plans, FakeEpicsSignal is
generally used in conjunction with make_fake_device to test any logic
inside of a Device subclass.
Unlike in SynSignal, this class is generally instantiated inside of a
subcomponent generated automatically by make_fake_device. This means we
need extra hooks for modifying the signal's properties after the class
instantiates.
We can emulate EpicsSignal features here. Currently we just emulate the put
limits because it was involved in a kwarg.
"""
def __init__(self, read_pv, write_pv=None, *, pv_kw=None,
put_complete=False, string=False, limits=False,
auto_monitor=False, name=None, **kwargs):
"""
Mimic EpicsSignal signature
"""
super().__init__(name=name, **kwargs)
self._use_limits = limits
self._put_func = None
def sim_set_func(self, func):
"""
Update the SynSignal function to set a new value on trigger.
"""
self._func = func
def sim_set_putter(self, putter):
"""
Define arbirary behavior on signal put.
This can be used to emulate basic IOC behavior.
"""
self._put_func = putter
def put(self, *args, **kwargs):
if self._put_func is not None:
return self._put_func(*args, **kwargs)
return super().put(*args, **kwargs)
def sim_put(self, *args, **kwargs):
"""
Update the read-only signal's value.
Implement here instead of FakeEpicsSignalRO so you can call it with
every fake signal.
"""
return Signal.put(self, *args, **kwargs)
@property
def limits(self):
return self._limits
def sim_set_limits(self, limits):
"""
Set the fake signal's limits.
"""
self._limits = limits
def check_value(self, value):
"""
Check fake limits before putting
"""
super().check_value(value)
if self._use_limits and not self.limits[0] < value < self.limits[1]:
raise LimitError('value={} limits={}'.format(value, self.limits))
class FakeEpicsSignalRO(SynSignalRO, FakeEpicsSignal):
"""
Read-only FakeEpicsSignal
"""
pass
fake_device_cache = {EpicsSignal: FakeEpicsSignal,
EpicsSignalRO: FakeEpicsSignalRO}
def hw():
"Build a set of synthetic hardware (hence the abbreviated name, hw)"
motor = SynAxis(name='motor', labels={'motors'})
motor1 = SynAxis(name='motor1', labels={'motors'})
motor2 = SynAxis(name='motor2', labels={'motors'})
motor3 = SynAxis(name='motor3', labels={'motors'})
jittery_motor1 = SynAxis(name='jittery_motor1',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
jittery_motor2 = SynAxis(name='jittery_motor2',
readback_func=lambda x: x + np.random.rand(),
labels={'motors'})
noisy_det = SynGauss('noisy_det', motor, 'motor', center=0, Imax=1,
noise='uniform', sigma=1, noise_multiplier=0.1,
labels={'detectors'})
det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
identical_det = SynGauss('det', motor, 'motor', center=0, Imax=1, sigma=1,
labels={'detectors'})
det1 = SynGauss('det1', motor1, 'motor1', center=0, Imax=5, sigma=0.5,
labels={'detectors'})
det2 = SynGauss('det2', motor2, 'motor2', center=1, Imax=2, sigma=2,
labels={'detectors'})
det3 = SynGauss('det3', motor3, 'motor3', center=-1, Imax=2, sigma=1,
labels={'detectors'})
det4 = Syn2DGauss('det4', motor1, 'motor1', motor2, 'motor2',
center=(0, 0), Imax=1, labels={'detectors'})
det5 = Syn2DGauss('det5', jittery_motor1, 'jittery_motor1', jittery_motor2,
'jittery_motor2', center=(0, 0), Imax=1,
labels={'detectors'})
flyer1 = MockFlyer('flyer1', det, motor, 1, 5, 20)
flyer2 = MockFlyer('flyer2', det, motor, 1, 5, 10)
trivial_flyer = TrivialFlyer()
ab_det = ABDetector(name='det', labels={'detectors'})
# area detector that directly stores image data in Event
direct_img = SynSignal(func=lambda: np.array(np.ones((10, 10))),
name='img', labels={'detectors'})
# area detector that stores data in a file
img = SynSignalWithRegistry(func=lambda: np.array(np.ones((10, 10))),
name='img', labels={'detectors'})
invariant1 = InvariantSignal(func=lambda: 0, name='invariant1',
labels={'detectors'})
invariant2 = InvariantSignal(func=lambda: 0, name='invariant2',
labels={'detectors'})
det_with_conf = DetWithConf(name='det', labels={'detectors'})
det_with_count_time = DetWithCountTime(name='det', labels={'detectors'})
rand = SynPeriodicSignal(name='rand', labels={'detectors'})
rand2 = SynPeriodicSignal(name='rand2', labels={'detectors'})
motor_no_pos = SynAxisNoPosition(name='motor', labels={'motors'})
bool_sig = Signal(value=False, name='bool_sig', labels={'detectors'})
motor_no_hints1 = SynAxisNoHints(name='motor1', labels={'motors'})
motor_no_hints2 = SynAxisNoHints(name='motor2', labels={'motors'})
# Because some of these reference one another we must define them (above)
# before we pack them into a namespace (below).
return SimpleNamespace(
motor=motor,
motor1=motor1,
motor2=motor2,
motor3=motor3,
jittery_motor1=jittery_motor1,
jittery_motor2=jittery_motor2,
noisy_det=noisy_det,
det=det,
identical_det=identical_det,
det1=det1,
det2=det2,
det3=det3,
det4=det4,
det5=det5,
flyer1=flyer1,
flyer2=flyer2,
trivial_flyer=trivial_flyer,
ab_det=ab_det,
direct_img=direct_img,
img=img,
invariant1=invariant1,
invariant2=invariant2,
pseudo3x3=SPseudo3x3(name='pseudo3x3'),
pseudo1x3=SPseudo1x3(name='pseudo1x3'),
sig=Signal(name='sig', value=0),
det_with_conf=det_with_conf,
det_with_count_time=det_with_count_time,
rand=rand,
rand2=rand2,
motor_no_pos=motor_no_pos,
motor_no_hints1=motor_no_hints1,
motor_no_hints2=motor_no_hints2,
bool_sig=bool_sig,
)
# Dump instances of the example hardware generated by hw() into the global
# namespcae for convenience and back-compat.
globals().update(hw().__dict__)
|
camera.py
|
import abc
import math
import threading
import time
from collections.abc import Generator
from typing import Optional
import cv2
import numpy as np
class BaseCamera(abc.ABC):
def __init__(
self,
*,
output_fps: float = 25.0,
camera_fps: float = 25.0,
) -> None:
"""Start the background camera thread"""
self._output_fps = output_fps
self._camera_fps = camera_fps
self._event = threading.Event()
self._frame: Optional[np.ndarray] = None
self._thread: Optional[threading.Thread] = None
def start(self) -> None:
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def frames(self) -> Generator[np.ndarray, None, None]:
"""Return the current camera frame."""
prev = time.monotonic()
while True:
if self._frame is not None:
yield self._frame
current = time.monotonic()
elasped = current - prev
prev = current
sleep_time = 1.0 / self._output_fps - elasped
time.sleep(max(sleep_time, 0.0))
@abc.abstractmethod
def _calculate_frame(self) -> Optional[np.ndarray]:
"""Generator that returns frames from the camera."""
raise NotImplementedError()
def _run(self) -> None:
"""Camera background thread."""
prev = time.monotonic()
while True:
frame = self._calculate_frame()
if frame is None:
raise Exception("Error calculating frame")
self._frame = frame
current = time.monotonic()
elasped = current - prev
prev = current
sleep_time = 1.0 / self._camera_fps - elasped
time.sleep(max(sleep_time, 0.0))
class OpenCVCamera(BaseCamera):
def __init__(
self,
source: str,
*,
format: str = ".jpg",
output_fps: float = 25.0,
) -> None:
self._format = format
self._camera = cv2.VideoCapture(source)
if not self._camera.isOpened():
raise Exception("Could not start camera.")
camera_fps = 25.0
prop_fps = self._camera.get(cv2.CAP_PROP_FPS)
if prop_fps and prop_fps > 0.0:
camera_fps = prop_fps / 2.0
super().__init__(output_fps=output_fps, camera_fps=camera_fps)
def __del__(self) -> None:
if self._camera is not None:
self._camera.release()
self._camera = None
def _calculate_frame(self) -> Optional[np.ndarray]:
if self._camera is None:
print("Camera is not initialized")
return None
success, unencoded_frame = self._camera.read()
if not success:
print("Cannot read from camera")
return None
success, encoded_frame = cv2.imencode(self._format, unencoded_frame)
if not success:
print("Cannot encode image from camera")
return None
return encoded_frame
class TrafficCounterCamera(OpenCVCamera):
def __init__(
self,
source: str,
*,
format: str = ".jpg",
output_fps: float = 25.0,
) -> None:
self._centroids: list[tuple[int, int]] = []
self._totalcars = 0
self._fgbg = cv2.createBackgroundSubtractorMOG2()
super().__init__(source, format=format, output_fps=output_fps)
def _calculate_frame(self) -> Optional[np.ndarray]:
if self._camera is None:
print("Camera is not initialized")
return None
success, unencoded_frame = self._camera.read()
if not success:
print("Cannot read from camera")
return None
# Only grab the highway
image = unencoded_frame[400:600, 0:1024]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fgmask = self._fgbg.apply(gray)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(opening, kernel)
_, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
bins, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
hull = [cv2.convexHull(c) for c in contours]
cv2.drawContours(image, hull, -1, (0, 255, 0), 3)
# min area for contours in case a bunch of small noise contours are created
minarea = 1000
# max area for contours, can be quite large for buses
maxarea = 50000
current_centroids = []
for i in range(len(contours)): # cycles through all contours in current frame
if hierarchy[0, i, 3] == -1:
area = cv2.contourArea(contours[i]) # area of contour
if minarea < area < maxarea: # area threshold for contour
# calculating centroids of contours
cnt = contours[i]
M = cv2.moments(cnt)
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
# gets bounding points of contour to create rectangle
# x,y is top left corner and w,h is width and height
x, y, w, h = cv2.boundingRect(cnt)
# creates a rectangle around contour
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Prints centroid text in order to double check later on
cv2.putText(
image,
str(cx) + "," + str(cy),
(cx + 10, cy + 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.3,
(0, 0, 255),
1,
)
cv2.drawMarker(
image,
(cx, cy),
(0, 0, 255),
cv2.MARKER_STAR,
markerSize=5,
thickness=1,
line_type=cv2.LINE_AA,
)
# adds centroids that passed previous criteria to centroid list
current_centroids.append((cx, cy))
max_distance = 100
existing_centroids = list(self._centroids)
for current_centroid in current_centroids:
found = -1
for i, existing_centroid in enumerate(existing_centroids):
if math.dist(current_centroid, existing_centroid) < max_distance:
found = i
break
if found > -1:
del existing_centroids[i]
else:
self._totalcars += 1
self._centroids = current_centroids
cv2.putText(
image,
"Cars: " + str(self._totalcars),
(5, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1.0,
(200, 200, 200),
2,
)
success, encoded_frame = cv2.imencode(self._format, image)
if not success:
print("Cannot encode image from camera")
return None
return encoded_frame
|
ui_client.py
|
from _thread import interrupt_main
from threading import Thread
from .pipe import Pipe
class UI:
def __init__(self, pipe):
# type: (Pipe) -> None
self.__pipe = pipe
self.__thread = Thread(target=self.__update)
self.__alive = True
self.__thread.start()
self.interrupt_main_to_close = True
self.on_change = lambda channel: None # callback function that is called when pipe read CMD_CHANGE_GPIO_IN
def __close(self):
# type: () -> None
self.__alive = False
def change_gpio_out(self, channel, is_high):
# type: (int, bool) -> None
self.__pipe.write_bytes([Pipe.CMD_CHANGE_GPIO_OUT, channel, 1 if is_high else 0])
def bind_gpio_in(self, channel, is_high):
# type: (int, bool) -> None
self.__pipe.write_bytes([Pipe.CMD_BIND_GPIO_IN, channel, 1 if is_high else 0])
def change_gpio_in(self, channel, is_high):
# type: (int, bool) -> None
self.__pipe.write_bytes([Pipe.CMD_CHANGE_GPIO_IN, channel, 1 if is_high else 0])
def cleanup(self, channel):
# type: (int) -> None
self.__pipe.write_bytes([Pipe.CMD_CLEANUP, channel])
def close(self):
self.interrupt_main_to_close = False
self.__pipe.write_bytes([Pipe.CMD_EXIT])
def __update(self):
buf = bytearray()
while self.__alive:
data = self.__pipe.read_bytes()
buf.extend(data)
while len(buf) > 0:
self.__handle_buffer(buf)
self.__pipe.close()
if self.interrupt_main_to_close:
interrupt_main()
def __handle_buffer(self, buf):
# type: (bytearray) -> None
cmd = buf.pop(0)
if cmd == Pipe.CMD_EXIT:
self.__close()
elif cmd == Pipe.CMD_CHANGE_GPIO_IN:
channel = buf.pop(0)
# TODO: if channel can't get, push back to buffer.
self.on_change(channel)
else:
raise AssertionError('Illegal command value (%d)' % cmd)
|
thead_lock2.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import threading
import time
num = 0
def run(n):
time.sleep(1)
global num
lock.acquire() #获取锁
num +=1
print '%s\n' %num
lock.release() #释放锁
lock = threading.Lock() #实例化锁
for i in range(100):
t = threading.Thread(target=run,args=(i,))
t.start()
'''
总结:使用如上线程你会发现问题,并没有输出到100会出现数据丢失的问题
'''
|
test_io.py
|
from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, asbytes_nested, bytes, asstr
from nose import SkipTest
from numpy.ma.testutils import (
TestCase, assert_equal, assert_array_equal,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
from numpy.testing.utils import tempdir
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with tempdir(prefix="numpy_test_big_arrays_") as tmpdir:
tmp = os.path.join(tmpdir, "file.npz")
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir:
fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', np.int), ('b', np.float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
|
messaging.py
|
import copy
import datetime
import logging
import re
import salt.exceptions
import salt.utils.event
import threading
import threading_more
import time
import urlparse
import uuid
from salt_more import SuperiorCommandExecutionError
from timeit import default_timer as timer
log = logging.getLogger(__name__)
class MessageProcessor(object):
"""
Native hooks:
- 'worker': TODO
- 'workflow': TODO
Built-in workers:
- 'shared': Workflow is performed by the current message processor thread.
- 'dedicated': Workflow is performed by a dedicated thread.
Built-in workflows and their additional hooks:
- 'simple': <handler> --> [trigger] --> [filter] --> [returner]
- 'extended': [validator] (--> [returner]) --> <handler> --> [converter] --> [trigger] --> [filter] --> [enricher] --> [retuner]
- 'manage': Special workflow for management stuff. See implementation for details.
NOTES:
- All hook functions registered using the hook decorator are synchronized before invocation to ensure thread safety.
- If you add your own hook methods by inheriting from this class you are responsible for thread synchronization.
"""
def __init__(self, default_hooks={}):
self._default_hooks = default_hooks
self._hook_funcs = {} # Index of all registered hook functions
self._hook_lock = threading.RLock() # Used to synchronize hook function calls
self._measure_stats = False
self.worker_threads = threading_more.ThreadRegistry() # Keeps track of all active workers
# Set default hooks that will be used if none specified
if not "worker" in self._default_hooks:
self._default_hooks["worker"] = "shared"
if not "workflow" in self._default_hooks:
self._default_hooks["workflow"] = "simple"
@property
def measure_stats(self):
return self._measure_stats
@measure_stats.setter
def measure_stats(self, value):
self._measure_stats = value
def register_hook(self, synchronize=True):
"""
Decorator to register hook functions for this message processor.
Args:
synchronize (bool): Enables thread synchronization for entire hook function.
"""
def decorator(func):
ret_func = func
# Wrap in synchronizer if requested
if synchronize:
ret_func = self._synchronize_wrapper(self._hook_lock, func)
# Add function to hook registry
name = func.__name__
self._hook_funcs[name] = ret_func
if log.isEnabledFor(logging.DEBUG):
log.debug("Registered hook function '%s'", name)
return ret_func
return decorator
def add_hook(self, name, kind, func, synchronize=True):
"""
Add hook function manually to this message processor.
"""
# Wrap in synchronizer if requested
if synchronize:
func = self._synchronize_wrapper(self._hook_lock, func)
self._hook_funcs["{:}_{:}".format(name, kind)] = func
def process(self, message):
"""
Process a single message.
"""
# Find worker and use it
func, settings = self._get_hook_for(message, "worker", parse_settings=True)
result = func(message, **settings)
return result
#region Available workers
def shared_worker(self, message, **settings):
"""
Run workflow in current thread.
"""
found, result = self._call_hook_for(message, "workflow", message)
return result
def dedicated_worker(self, message, **settings):
"""
Run workflow in a dedicated thread.
"""
# Check if we need to dequeue message from an existing worker thread
if "dequeue" in settings:
threads = self.worker_threads.do_for_all_by(settings["dequeue"],
lambda t: t.context["messages"].remove(message))
return {
"dequeued": [t.name for t in threads]
}
# Check if we need to enqueue message to an existing worker thread
if "enqueue" in settings:
threads = self.worker_threads.do_for_all_by(settings["enqueue"],
lambda t: t.context["messages"].append(message))
return {
"enqueued": [t.name for t in threads]
}
# Exceptions will NOT kill worker thread as default behavior
suppress_exceptions = settings.pop("suppress_exceptions", True)
# Terminates worker thread after a successful run without warnings nor exceptions
kill_upon_success = settings.pop("kill_upon_success", False)
# Perform entire job iteration transactionally
transactional = settings.pop("transactional", False)
# Prepare function that performs actual work
def do_work(thread, context):
success = True
# Loop through all messages found in thread context
for message in list(context["messages"]): # Copy list to allow changes while looping
try:
# Run workflow
self._call_hook_for(message, "workflow", message)
except Warning as wa:
success = False
# Register time of last warning in context
context["last_warning"] = datetime.datetime.utcnow().isoformat()
# Also register all distinct warning messages in context
msg = str(wa)
context.setdefault("distinct_warnings", {}).setdefault(msg, 0)
context["distinct_warnings"][msg] += 1
# Only allow recurring warnings to be logged every minute
if context["distinct_warnings"][msg] > 3 \
and timer() - getattr(thread, "warning_log_timer", 0) < 60:
return
setattr(thread, "warning_log_timer", timer())
# Go ahead and log the warning
if context["distinct_warnings"][msg] > 1:
log.info("Recurring warning ({:} times) in worker thread '{:}': {:}".format(context["distinct_warnings"][msg], thread.name, wa))
else:
log.info("Warning in worker thread '{:}': {:}".format(thread.name, wa))
except Exception as ex:
success = False
# Register time of last error in context
context["last_error"] = datetime.datetime.utcnow().isoformat()
# Also register all distinct error messages in context
msg = str(ex)
context.setdefault("distinct_errors", {}).setdefault(msg, 0)
context["distinct_errors"][msg] += 1
# Only allow recurring exceptions to be logged every minute
if suppress_exceptions and context["distinct_errors"][msg] > 3 \
and timer() - getattr(thread, "exception_log_timer", 0) < 60:
return
setattr(thread, "exception_log_timer", timer())
# Go ahead and log the exception
if context["distinct_errors"][msg] > 1:
log.exception("Recurring exception ({:} times) in worker thread '{:}' while running workflow for message: {:}".format(context["distinct_errors"][msg], thread.name, message))
else:
log.exception("Exception in worker thread '{:}' while running workflow for message: {:}".format(thread.name, message))
# Finally suppress or propagate the exception
if suppress_exceptions:
if transactional:
log.info("Suppressing prior exception in worker thread '{:}' and skips any following work".format(thread.name))
break
else:
log.info("Suppressing prior exception in worker thread '{:}' and continues as normal".format(thread.name))
else:
raise
# Clear any warnings and errors on success
if success:
context.pop("distinct_warnings", None)
context.pop("distinct_errors", None)
if kill_upon_success and success:
thread.kill()
if log.isEnabledFor(logging.DEBUG):
log.debug("Killed worker thread '{:}' upon successful run".format(thread.name))
# Start immediately is default
start = settings.pop("start", True)
# Add new worker thread
thread = threading_more.WorkerThread(
target=self._synchronize_wrapper(self._hook_lock, do_work) if transactional else do_work,
context={"messages": [message] if message else []},
registry=self.worker_threads, # Registers thread in registry
**settings) # Pass additional settings
if start:
thread.start()
return {
"started": thread.name
}
return {
"created": thread.name
}
#endregion
#region Available workflows
def simple_workflow(self, message):
"""
Simlpe message processing flow and available hooks:
<handler> --> [trigger] --> [filter] --> [returner]
"""
args = message.get("args", [])
kwargs = message.get("kwargs", {})
result = None
try:
# Call handler hook
_, result = self._call_hook_for(message, "handler", *args, **kwargs)
except Exception as ex:
result = ex
raise
finally:
# Always call trigger hook(s), also on error or empty result
try:
self._call_hooks_for(message, "trigger", result)
except:
pass # Already logged
# Call filter hook (chain) if there is a result
if result:
found, filtered_result = self._call_hook_chain_for(message, "filter", result)
if found:
result = filtered_result
# Call returner hook(s) if there is a result
if result:
self._call_hooks_for(message, "returner", message, result)
return result
def extended_workflow(self, message):
"""
Extended message processing flow and available hooks:
[validator] --> <handler> --> [converter] --> [trigger] --> [filter] --> [enricher] --> [retuner]
"""
args = message.get("args", [])
kwargs = message.get("kwargs", {})
# Call validator hook (chain)
found, error = self._call_hook_chain_for(message, "validator", *args, **kwargs)
if found and error:
raise Exception(error)
result = None
try:
# Call handler hook
_, result = self._call_hook_for(message, "handler", *args, **kwargs)
# Call converter hook (chain) if there is a result
if result:
found, converted_result = self._call_hook_chain_for(message, "converter", result)
if found:
result = converted_result
except Exception as ex:
result = ex
raise
finally:
# Always call trigger hook(s), also on error or empty result
try:
self._call_hooks_for(message, "trigger", result)
except:
pass # Already logged
# Call filter hook (chain) if there is a result
if result:
found, filtered_result = self._call_hook_chain_for(message, "filter", result)
if found:
result = filtered_result
# Call enricher hook (chain) if there is a result
if result:
found, enriched_result = self._call_hook_chain_for(message, "enricher", result)
if found:
result = enriched_result
# Call returner hook(s) if there is a result
if result: # TODO HN: How if returner fails if multiple?
self._call_hooks_for(message, "returner", message, result)
return result
def manage_workflow(self, message):
"""
Administration workflow to query and manage this processor instance.
Supported commands:
- hook list|call <name> [argument]... [<key>=<value>]...
- worker list|show|create|start|pause|resume|kill <name> [<key>=<value>]...
- run <key>=<value>...
"""
args = message.get("args", [])
kwargs = message.get("kwargs", {})
if len(args) > 1 and args[0] == "hook":
if args[1] == "list":
return {
"values": [h for h in self._hook_funcs]
}
elif args[1] == "call":
return self._get_func(args[2])(*args[3:], **kwargs)
elif len(args) > 1 and args[0] == "worker":
if args[1] == "list":
return {
"values": [
t.name for t in self.worker_threads.find_all_by(args[2] \
if len(args) > 2 else "*")
]
}
elif args[1] == "show":
threads = self.worker_threads.find_all_by(args[2] if len(args) > 2 else "*")
return {
"value": {t.name: t.context for t in threads}
}
elif args[1] == "create":
return self.dedicated_worker(None, **kwargs)
elif args[1] == "start":
threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.start(**kwargs))
return {
"values": [t.name for t in threads]
}
elif args[1] == "pause":
threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.pause(**kwargs))
return {
"values": [t.name for t in threads]
}
elif args[1] == "resume":
threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.resume(**kwargs))
return {
"values": [t.name for t in threads]
}
elif args[1] == "kill":
threads = self.worker_threads.do_for_all_by(args[2], lambda t: t.kill(**kwargs))
return {
"values": [t.name for t in threads]
}
elif len(args) > 0 and args[0] == "run":
msg = kwargs
return self.process(msg)
raise Exception("Invalid or unknown command")
#endregion
#region Private helpers
def _call_hook_for(self, message, kind, *args, **kwargs):
func = self._get_hook_for(message, kind)
if func:
return True, func(*args, **kwargs)
return False, None
def _call_hooks_for(self, message, kind, *args, **kwargs):
errors = []
funcs = self._get_hooks_for(message, kind)
for func in funcs:
try:
func(*args, **kwargs)
except Exception as ex:
log.exception("Error when calling {:} hook for message: {:}".format(kind, message))
errors.append(ex)
# Raise if error(s)
if errors:
raise Exception("Failed to call {:}/{:} {:} hook(s) for message: {:}".format(len(errors), len(funcs), kind, message))
def _call_hook_chain_for(self, message, kind, *args, **kwargs):
ret = (False, None)
for func in self._get_hooks_for(message, kind):
res = func(*args, **kwargs)
ret = (True, res)
if res != None:
break
return ret
def _get_hook_for(self, message, kind, parse_settings=False):
url = self._get_hook_url_for(message, kind)
if not url:
return
name = url
# Parse settings from url if requsted
if parse_settings:
name, settings = self._parse_hook_url(url)
# Get hook function by name
func = self._get_func("{:s}_{:s}".format(name, kind))
# Wrap hook function in order to measure statistics
if self._measure_stats:
func = self._stats_wrapper_for(message, kind, func)
if parse_settings:
return (func, settings)
else:
return func
def _get_hooks_for(self, message, kind):
ret = []
url = self._get_hook_url_for(message, kind)
if not url:
return ret
for name in url.split(","):
# Get hook function by name
func = self._get_func("{:s}_{:s}".format(name, kind))
# Wrap hook function in order to measure statistics
if self._measure_stats:
func = self._stats_wrapper_for(message, kind, func)
ret.append(func)
return ret
def _stats_wrapper_for(self, message, kind, func):
def stats_wrapper(*args, **kwargs):
start = timer()
try:
return func(*args, **kwargs)
finally:
duration = timer() - start
stats = message.setdefault("_stats", {}).setdefault(kind, {
"duration": {
"acc": 0.0,
"avg": 0.0,
"min": -1.0,
"max": -1.0
},
"count": 0
})
stats["count"] += 1
stats["duration"]["acc"] += duration
stats["duration"]["avg"] = stats["duration"]["acc"] / stats["count"]
if duration < stats["duration"]["min"] or stats["duration"]["min"] < 0:
stats["duration"]["min"] = duration
if duration > stats["duration"]["max"]:
stats["duration"]["max"] = duration
return stats_wrapper
def _parse_hook_url(self, url):
u = urlparse.urlparse(url)
name = u.path
settings = {}
if u.query:
qs = urlparse.parse_qs(u.query, strict_parsing=True)
for k, v in qs.iteritems():
# Convert into appropriate types using eval (integers, decimals and booleans)
v = [eval(e) if re.match("^(?:[-+]?\d*\.?\d*|True|False)$", e) else e for e in v]
if len(v) == 1:
settings[k] = v[0]
else:
settings[k] = v
return (name, settings)
def _get_hook_url_for(self, message, kind):
return message.get(kind, self._default_hooks.get(kind, None))
def _get_func(self, name):
if name in self._hook_funcs:
return self._hook_funcs[name]
elif hasattr(self, name):
return getattr(self, name)
else:
raise Exception("No function found for hook '{:}'".format(name))
def _synchronize_wrapper(self, lock, func):
def synchronizer(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return synchronizer
#endregion
class EventDrivenMessageProcessor(MessageProcessor):
def __init__(self, namespace, context={}, default_hooks={}):
MessageProcessor.__init__(self, default_hooks)
self._namespace = namespace
self._context = context
self._tag_regex = re.compile("^{:s}/req/(?P<id>.+)$".format(namespace))
self._event_matchers = []
self._bus_lock = threading.RLock() # Used to synchronize event bus function calls
self._outgoing_event_filters = {}
self._reactors = []
def init(self, __salt__, __opts__, hooks=[], workers=[], reactors=[]):
"""
Initialize this instance.
"""
# Dedicated event bus handle for receiving events
self._incoming_bus = salt.utils.event.get_event("minion",
opts=__opts__,
transport=__opts__["transport"],
listen=True)
# Dedicated event bus handle for sending events
self._outgoing_bus = salt.utils.event.get_event("minion",
opts=__opts__,
transport=__opts__["transport"],
listen=False)
# Register matcher for event processor
self.register_event_matcher(
self._tag_regex.pattern,
self.process_event,
match_type="regex")
# Add given workflow hooks
for hook in hooks or []:
try:
# Special handling of returners
if hook["kind"] == "returner":
# Load Salt returner function
returners = salt.loader.returners(__opts__, __salt__) # TODO: This can be cached
returner_func = returners[hook["func"]]
# Wrap returner function to add defined args and kwargs
def returner_wrapper(message, result, hook=hook, returner_func=returner_func):
# Skip empty results
if not result:
return
args = hook.get("args", [])
kwargs = hook.get("kwargs", {})
# Automatically set namespace as kind for data results
if not args and returner_func.__name__ == "returner_data":
args.append(self._namespace)
return returner_func(result, *args, **kwargs)
self.add_hook(hook["name"], hook["kind"], returner_wrapper, synchronize=hook.get("lock", False))
else:
self.add_hook(hook["name"], hook["kind"], __salt__[hook["func"]], synchronize=hook.get("lock", False))
except Exception:
log.exception("Failed to add hook: {:}".format(hook))
# Add given workers
for worker in workers or []:
messages = worker.pop("messages")
self.dedicated_worker(None, start=False, **worker)
# Enqueue all messages to worker
for message in messages:
self.dedicated_worker(message, enqueue=worker["name"])
# Add given reactors
for reactor in reactors or []:
# Define function to handle events when matched
def on_event(event, match=None, reactor=reactor):
# Check if conditions is defined
conditions = reactor.get("conditions", [])
if "condition" in reactor:
conditions.append(reactor["condition"])
for index, condition in enumerate(conditions, 1):
if keyword_resolve(condition, keywords={"event": event, "match": match, "context": self._context}):
log.info("Event meets condition #{:} '{:}': {:}".format(index, condition, event))
else:
return
# Process all action messages
actions = reactor.get("actions", [])
if "action" in reactor:
actions.append(reactor["action"])
for index, message in enumerate(actions, 1):
# Check if keyword resolving is enabled
if reactor.get("keyword_resolve", False):
resolved_message = keyword_resolve(copy.deepcopy(message), keywords={"event": event, "match": match, "context": self._context})
if log.isEnabledFor(logging.DEBUG):
log.debug("Keyword resolved message: {:}".format(resolved_message))
# TODO: Figure out if we can improve performance by processing each message in a dedicated worker thread or process?
res = self.process(resolved_message)
else:
res = self.process(message)
if index < len(actions) and reactor.get("chain_conditionally", False):
if not res or isinstance(res, dict) and not res.get("result", True):
if log.isEnabledFor(logging.DEBUG):
log.debug("Breaking action chain after message #{:} '{:}' because of result '{:}'".format(index, message, result))
break
match_type = None
if "regex" in reactor:
match_type = "regex"
elif "startswith" in reactor:
match_type = "startswith"
elif "endswith" in reactor:
match_type = "endswith"
elif "fnmatch" in reactor:
match_type = "fnmatch"
else:
log.error("No valid match type found for reactor: {:}".format(reactor))
continue # Skip reactor
# Register event matcher using above function
self.register_reactor(reactor, on_event, match_type=match_type)
def _custom_match_tag_regex(self, event_tag, search_tag):
return self._incoming_bus.cache_regex.get(search_tag).search(event_tag)
def register_reactor(self, reactor, func, match_type="startswith"):
"""
Register a reactor and register it as an event matcher. A wrapper function for register_event_matcher.
"""
self._reactors.append(reactor)
self.register_event_matcher(reactor[match_type], func, match_type=match_type)
def register_event_matcher(self, tag, func, match_type="startswith"):
"""
Register additional event matchers to catch other events.
"""
em = {
"tag": tag,
"match_type": match_type,
"match_func": self._custom_match_tag_regex if match_type == "regex" else self._incoming_bus._get_match_func(match_type),
"func": func,
}
self._event_matchers.append(em)
def manage_workflow(self, message):
"""
Administration workflow to query and manage this processor instance.
Supported commands:
- reactor list|show <name>
"""
args = message.get('args', [])
kwargs = message.get('kwargs', {})
if len(args) > 1 and args[0] == 'reactor':
if args[1] == 'list':
return {
"values": [r['name'] for r in self._reactors],
}
elif args[1] == 'show':
if len(args) > 2 and args[2] != '*':
reactors = [r for r in self._reactors if args[2] in r['name']]
else:
reactors = [r for r in self._reactors]
return {
"value": {r['name']: r for r in reactors}
}
else:
return super(EventDrivenMessageProcessor, self).manage_workflow(message)
def run(self):
"""
Blocking method that processes all received events.
"""
# Ensure all worker threads with auto start enabled are started
threads = self.worker_threads.do_for_all(lambda t: t.auto_start, lambda t: t.start())
if threads:
log.info("Starting {:d} worker thread(s): {:s}".format(len(threads), ", ".join([t.name for t in threads])))
# Listen for incoming messages
if log.isEnabledFor(logging.DEBUG):
log.debug("Listening for incoming events using %d registered event matcher(s)", len(self._event_matchers))
try:
for event in self._incoming_bus.iter_events(full=True, auto_reconnect=True):
if not event:
log.warn("Skipping empty event")
continue
try:
for matcher in self._event_matchers:
match = matcher["match_func"](event["tag"], matcher["tag"])
if not match:
continue
if log.isEnabledFor(logging.DEBUG):
log.debug("Matched event: %s", repr(event))
matcher["func"](event, match=match)
except Exception:
log.exception("Failed to process received event: {:}".format(event))
finally:
# Ensure all worker threads are killed
threads = self.worker_threads.do_for_all_by("*", lambda t: t.kill(), force_wildcard=True)
if threads:
log.info("Killing all worker thread(s): {:s}".format(", ".join([t.name for t in threads])))
def process_event(self, event, **kwargs):
"""
Process a received event.
"""
res = None
try:
# Extract message from event
message = event["data"]
# Add reference to original event tag
# (used to get correlation id when/if sending back reply)
message["_event_tag"] = event["tag"]
# Process message
res = self.process(message)
except Exception as ex:
log.exception("Exception while processing event: {:}".format(event))
res = {
"error": str(ex)
}
finally:
# Send back reply event
if res != None:
self.send_reply_event_for(message, res)
else:
log.warn("No reply to send back for event: {:}".format(event))
def trigger_event(self, data, tag, skip_duplicates_filter=None):
"""
Trigger an outgoing event.
"""
# Check for duplicates to skip
if skip_duplicates_filter != None:
skip_duplicates_filter = "dupl:{:}".format(skip_duplicates_filter)
if (tag, data) == self._outgoing_event_filters.get(skip_duplicates_filter, None):
if log.isEnabledFor(logging.DEBUG):
log.debug("Skipping duplicate event with tag '{:s}': {:}".format(tag, data))
return
log.info("Triggering event '{:s}': {:}".format(tag, data))
with self._bus_lock: # Synchronize just to be safe
self._outgoing_bus.fire_event(data.copy(), tag)
# Register last event for duplicate filter
if skip_duplicates_filter != None:
self._outgoing_event_filters[skip_duplicates_filter] = (tag, data)
def subscribe_to_events(self, tag, match_type="startswith"):
"""
Decorator to let a function subscribe to events matching specified tag pattern.
"""
def decorator(func):
self._event_matchers.append({
"tag": tag,
"match_type": match_type,
"match_func": None,
"func": func,
})
return func
return decorator
def send_reply_event_for(self, message, data):
"""
Send back reply data for a received message.
"""
# Extract correlation id from original event
match = self._tag_regex.match(message["_event_tag"])
groups = match.groupdict()
tag = "{:s}/res/{:s}".format(self._namespace, groups["id"])
if log.isEnabledFor(logging.DEBUG):
log.debug("Sending reply mesage with tag '{:s}': {:}".format(tag, data))
# Send reply event
with self._bus_lock: # Synchronize just to be safe
self._outgoing_bus.fire_event(data, tag)
#region Built-in hooks
def reply_returner(self, message, result):
self.send_reply_event_for(message, result)
#endregion
class EventDrivenMessageClient(object):
def __init__(self, namespace, default_timeout=30):
self._namespace = namespace
self._default_timeout = default_timeout
def init(self, opts):
self._opts = opts
def send_sync(self, message, timeout=None):
if timeout == None:
timeout = message.get("timeout", self._default_timeout)
correlation_id = uuid.uuid4()
req_tag = "{:s}/req/{:s}".format(self._namespace, correlation_id)
res_tag = "{:s}/res/{:s}".format(self._namespace, correlation_id)
bus = salt.utils.event.get_event("minion",
opts=self._opts,
transport=self._opts["transport"],
listen=True)
try:
bus.subscribe(tag=res_tag, match_type="startswith")
if log.isEnabledFor(logging.DEBUG):
log.debug("Sending request message with tag '%s': %s", req_tag, message)
bus.fire_event(message, req_tag)
reply = self._recv_reply(bus, timeout=timeout, tag=res_tag, match_type="startswith")
return reply
finally:
try:
bus.destroy()
except:
log.exception("Unable to destroy event bus")
def _recv_reply(self, bus, timeout=None, **kwargs):
# Determine timeout
timeout = timeout or self._default_timeout
# Wait for message until timeout
message = bus.get_event(wait=timeout, **kwargs)
if not message:
log.warn("No reply message with tag '%s' received within timeout of %d secs", kwargs.get("tag", None), timeout)
raise salt.exceptions.CommandExecutionError(
"No reply message received within timeout of {:d} secs - please try again and maybe increase timeout value".format(timeout))
# Check for error
if "error" in message:
if isinstance(message["error"], dict):
raise SuperiorCommandExecutionError(str(message["error"]), data=message["error"])
raise salt.exceptions.CommandExecutionError(message["error"])
return message
def msg_pack(*args, **kwargs):
"""
Helper method to pack message into dict.
"""
msg = {}
if args:
msg["args"] = args
if kwargs:
for k, v in kwargs.iteritems():
if k.startswith("__"): # Filter out Salt params (__pub_*)
continue
if k.startswith("_"):
msg[k.lstrip("_")] = v
else:
if not "kwargs" in msg:
msg["kwargs"] = {}
msg["kwargs"][k] = v
return msg
def keyword_resolve(data, keywords={}, symbol="$"):
"""
Helper method to resolve keywords in a data structure.
"""
if isinstance(data, (list, tuple, set)):
for idx, val in enumerate(data):
data[idx] = keyword_resolve(val, keywords)
if isinstance(data, dict):
res = {}
for key, val in data.iteritems():
res[keyword_resolve(key, keywords)] = keyword_resolve(val, keywords)
data = res
elif isinstance(data, basestring) and symbol in data:
# Replace keywords in data
for key in keywords:
data = data.replace("{:s}{:s}".format(symbol, key), "__{:s}__".format(key))
return eval(data, {"__{:s}__".format(key): val for key, val in keywords.iteritems()})
return data
def extract_error_from(result):
"""
Helper function to extract error from a result.
"""
if not result:
log.error("Cannot attempt to extract error from an empty result: {:}".format(result))
return
return result if isinstance(result, Exception) else result.get("error", None) if isinstance(result, dict) else result
def filter_out_unchanged(result, context={}, kind=None):
"""
Helper function to filter out unchanged results recursively based on their specified types.
"""
# Build qualified type string for the result
kind = ".".join(filter(None, [kind, result.get("_type", None)]))
# Loop through all keys in the result and build entry with the significant alternating values
entry = {}
for key, val in result.iteritems():
# Skip all meta/hidden
if key.startswith("_"):
continue
# Dive into list in an attempt to filter it
if isinstance(val, list):
vals = []
for res in val:
# Recursive handling of dictionary values
if isinstance(res, dict):
sub_res = filter_out_unchanged(res, context=context, kind=kind)
if sub_res:
vals.append(sub_res)
# Special handling of primitive values - they are always added
else:
vals.append(res)
# Ensure primitive values are also added to entry
entry[key] = vals
# Set filtered values on result
result[key] = vals
# Ordinary primitive or dictionary value
else:
entry[key] = val
# Do we have a type and an entry with one or more significant alternating values?
if kind and entry:
# Compare if entry equals content cached in context
if context.get(kind, None) == entry:
# Skip entry when equal to cached
return
# Otherwise update cache with recent content
else:
context[kind] = entry
return result
|
client.py
|
# Python 3.6.2
import socket
import struct
import threading
import datetime
import os
import random
import sys
import secrets
from time import sleep
from hashlib import sha3_224
# Switch to the directory containing client.py
this_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(this_dir)
# Insert the server, misc, and src/inter/* directories to PATH so we can use modules like inject, vote, discover, etc.
sys.path.insert(0, (os.path.abspath('../server')))
sys.path.insert(0, (os.path.abspath('../misc')))
sys.path.insert(0, (os.path.abspath('../inter/')))
sys.path.insert(0, (os.path.abspath('../inter/modules')))
# Imports from PATH
import primitives
import traceback
# Immutable state; Constant node parameters set upon initialization and/or configuration
_original_path = os.path.dirname(os.path.realpath(__file__))
no_prop = "ffffffffffffffff"
ring_prop = "eeeeeeeeeeeeeeee"
localhost = socket.socket()
localhost.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Nobody likes TIME_WAIT-ing. Add SO_REUSEADDR.
nodeConfig = [3705, False, "Debug", "Client", None, None, _original_path, 0, "", "", "", localhost] # 13 bytes + context
# Mutable state; Write with writeState(), Read with readState(). Contains default values until changed
nodeState = [(), [], False, False, [], "", [], 0, [], [], False, False, False]
# Thread locks
nodestate_lock = threading.Lock()
command_execution_lock = threading.Lock()
fileIO_lock = threading.Lock()
send_lock = threading.Lock()
respond_lock = threading.Lock()
os.chdir(_original_path)
Primitives = primitives.Primitives("Client", "")
class Client:
@staticmethod
def lock(lock, name=None):
if name and type(name) == str:
Primitives.log("Locking: "+name, in_log_level="Info")
lock.acquire()
@staticmethod
def release(lock, name=None):
# if name and type(name) == str:
# print("releasing "+name)
lock.release()
def overwrite_nodestate(self, in_nodestate, write_nodeConfig=False):
global nodestate_lock
self.lock(nodestate_lock, name="nodeState")
global nodeState, nodeConfig
if not write_nodeConfig:
nodeState = in_nodestate
else:
nodeConfig = in_nodestate
self.release(nodestate_lock, name="nodeState")
def write_nodestate(self, in_nodestate, index, value, void=True):
global nodestate_lock
self.lock(nodestate_lock, name="nodeState")
global nodeState, nodeConfig
in_nodestate[index] = value
if void:
if in_nodestate == nodeConfig:
print("Setting nodeConfig["+str(index)+"]"+" to "+str(value))
nodeConfig = list(in_nodestate)
else:
print("Setting nodeState["+str(index)+"]"+" to "+str(value))
nodeState = list(in_nodestate)
self.release(nodestate_lock, name="nodeState")
if not void:
self.release(nodestate_lock, name="nodeState")
return in_nodestate
def read_nodestate(self, index, in_nodestate=None):
global nodeState, nodestate_lock
self.lock(nodestate_lock, name="nodeState")
if not in_nodestate:
current_nodeState = nodeState
else:
current_nodeState = in_nodestate
self.release(nodestate_lock, name="nodeState")
return current_nodeState[index]
def read_nodeConfig(self, index):
global nodeConfig
return self.read_nodestate(index, in_nodestate=nodeConfig)
def write_nodeConfig(self, _nodeConfig, index, value):
return self.write_nodestate(nodeConfig, index, value)
else:
current_nodeState = in_nodestate
self.release(nodestate_lock, name="nodeState")
return current_nodeState[index]
def read_nodeConfig(self, index):
global nodeConfig
return self.read_nodestate(index, in_nodestate=nodeConfig)
def write_nodeConfig(self, _nodeConfig, index, value):
return self.write_nodestate(nodeConfig, index, value)
@staticmethod
def prepare(message, salt=True):
""" Assign unique hashes to messages ready for transport.
Returns (new hashed message) -> str """
out = ""
# Assign a timestamp
if salt:
timestamp = str(datetime.datetime.utcnow())
stamped_message = timestamp + message
out += stamped_message
else:
out += message
# Generate the hash and append the message to it
sig = sha3_224(out.encode()).hexdigest()[:16]
out = sig + ":" + message
return out
def lookup_socket(self, address, ext_net_tuple=None): # TODO: optimize me
"""Brute force search the network tuple for a socket associated with a given address.
Return socket object if found.
Returns 0(-> int) if not found
"""
if ext_net_tuple:
net_tuple = ext_net_tuple
else:
net_tuple = self.read_nodestate(0)
for item in net_tuple:
discovered_address = item[1]
if address == discovered_address:
return item[0]
return 0 # Socket not found
def lookup_address(self, in_sock, ext_net_tuple=None): # TODO: optimize me
"""Brute force search the network tuple for an address associated with a given socket.
Return a string containing an address if found.
Returns 0 (-> int) if not found
"""
if ext_net_tuple:
net_tuple = ext_net_tuple
else:
net_tuple = self.read_nodestate(0)
for item in net_tuple:
discovered_socket = item[0]
if in_sock == discovered_socket:
return item[1]
return 0 # Address not found
def permute_network_tuple(self):
""" Permute the network tuple. Repetitive permutation after each call
of respond() functionally allows the network to inherit many of the anonymous
aspects of a mixing network. Packets are sent sequentially in the order of the
network tuple, which when permuted, thwarts many timing attacks. ''
Doesn't return """
Primitives.log("Permuting the network tuple", in_log_level="Debug")
net_tuple = self.read_nodestate(0)
net_list = list(net_tuple)
cs_prng = random.SystemRandom()
cs_prng.shuffle(net_list)
# Tuples are immutable. We have to overwrite the exiting one to 'update' it.
net_new_tuple = tuple(net_list)
self.write_nodestate(nodeState, 0, net_new_tuple)
def append(self, in_socket, address):
""" Append a given connection object(tuple of (socket, address)) to the network tuple.
Doesn't return """
net_tuple = self.read_nodestate(0)
# Tuples are immutable; convert it to a list.
net_list = list(net_tuple)
connection = (in_socket, address)
net_list.append(connection)
net_tuple = tuple(net_list)
self.write_nodestate(nodeState, 0, net_tuple)
Primitives.log("Successfully appended connection to Network tuple." +
"\nConnection:" + str(connection) +
"\nNew Network Tuple: " + str(net_tuple), in_log_level="Debug")
def remove(self, connection):
""" Remove a given connection object(tuple of (socket, address)) from the network tuple.
Doesn't return """
# Tuples are immutable; convert it to a list.
net_tuple = self.read_nodestate(0)
net_list = list(net_tuple)
# Identify and remove said connection
try:
index = net_list.index(connection)
net_list.pop(index)
# Connection not in network tuple, or socket is [closed]
except ValueError:
Primitives.log(str("Not removing non-existent connection: " + str(connection)), in_log_level="Warning")
return None
# (Again) tuples are immutable; replace the old one with the new one
net_tuple = tuple(net_list)
self.write_nodestate(nodeState, 0, net_tuple)
Primitives.log("Successfully removed connection from Network tuple." +
"\nConnection:" + str(connection) +
"\nNew Network Tuple: " + str(net_tuple), in_log_level="Debug")
def connect(self, connection, address, port, local=False):
""" Connect to a remote server and handle the connection(i.e append it to network_tuple).
Doesn't return. """
connecting_to_server = self.read_nodestate(2)
sock = connection[0]
# Make a real copy of the network tuple
# Then append our new connection (will be removed if connection fails)
print(self.read_nodestate(0))
net_tuple = tuple(self.read_nodestate(0))
# Don't connect to an address we're already connected to.
if connection in net_tuple or self.lookup_socket(address) != 0:
not_connecting_msg = str("Not connecting to " + connection[1] + " (We're already connected.)")
Primitives.log(not_connecting_msg, in_log_level="Warning")
self.remove((sock, address))
# Do connect to nodes we are not already connected to
else:
# Also don't try to connect to multiple servers at once in the same thread.
if not connecting_to_server:
# connecting_to_server is a mutex which prevents this function
# from making external connections when it's not supposed to.
self.write_nodestate(nodeState, 2, True) # set connecting_to_server = True
if not local:
Primitives.log(str("Connecting to " + address), in_log_level="Info")
sock.settimeout(5)
try:
sock.connect((address, port))
except OSError:
Primitives.log("Unable to connect to "+address+". (OSError)", in_log_level="Warning")
Primitives.log("Successfully connected.", in_log_level="Info")
self.append(sock, address)
self.write_nodestate(nodeState, 2, False) # set connecting_to_server = False
elif local:
self.remove((sock, address))
Primitives.log("Connecting to localhost server...", in_log_level="Info")
try:
sock.connect(("127.0.0.1", port))
self.append(sock, "127.0.0.1")
except OSError:
Primitives.log("Unable to connect to "+address+". (OSError)", in_log_level="Warning")
# The socket object we appended earlier was automatically
# destroyed by the OS because connections to 0.0.0.0 are illegal...
# Connect to localhost with raddr=127.0.0.1...
Primitives.log("Successfully connected to localhost server", in_log_level="Info")
self.write_nodestate(nodeState, 2, False) # set connecting_to_server = False
def disconnect(self, connection, disallow_local_disconnect=True):
""" Try to disconnect from a remote server and remove it from the network tuple.
Returns None if you do something stupid. otherwise don't return """
# 1. Input validation
try:
sock = connection[0]
address_to_disconnect = connection[1]
except TypeError:
Primitives.log("Expected a connection tuple, got:", in_log_level="Warning")
Primitives.log(str('\t') + str(connection), in_log_level="Warning")
return None
# 2. Try to disconnect from said node.
try:
# Don't disconnect from localhost unless told to. That's done with self.terminate().
if disallow_local_disconnect:
if address_to_disconnect == Primitives.get_local_ip() or address_to_disconnect == "127.0.0.1":
Primitives.log("Not disconnecting from localhost dimwit.", in_log_level="Warning")
# Do disconnect from remote nodes. That sometimes makes sense.
else:
verbose_connection_msg = str("Disconnecting from " + address_to_disconnect
+ "\n\t( " + str(sock) + " )")
Primitives.log(verbose_connection_msg, in_log_level="Info")
self.remove(connection)
try:
sock.close()
except (OSError, AttributeError):
close_fail_msg = str("Failed to close the socket of "
+ address_to_disconnect
+ " -> OSError -> disconnect()")
Primitives.log(close_fail_msg, in_log_level="Warning")
finally:
Primitives.log("Successfully disconnected.", in_log_level="Info")
# Either the socket in question doesn't exist, or the socket is probably [closed].
except (IndexError, ValueError):
Primitives.log("Already disconnected from that address, passing...", in_log_level="Warning")
pass
""" The following send() function was written by StackOverflow user
Adam Rosenfield, then modified by me, HexicPyth.
https://stackoverflow.com/a/17668009
https://stackoverflow.com/users/9530/adam-rosenfield """
def send(self, connection, message, sign=True):
"""Helper function to encode a given message and send it to a given server.
Set sign=False to disable automatic message signing(useful for no_prop things)
"""
global send_lock
self.lock(send_lock, name="Send lock")
sock = connection[0]
if sign:
msg = self.prepare(message).encode('utf-8')
else:
msg = message.encode('utf-8')
# Prefix each message with a 4-byte length (network byte order)
msg = struct.pack('>I', len(msg)) + msg
# Attempt to send the message through normal means.
try:
sock.sendall(msg)
# Socket probably disconnected, let's do the same and remove it
# from the network tuple to avoid conflict.
except OSError:
self.disconnect(connection)
self.release(send_lock, name="Send lock")
def broadcast(self, message, do_mesh_propagation=True, in_nodeState=None):
global ring_prop
# do_message_propagation=None means use global config in nodeState[12]
if in_nodeState:
net_tuple = in_nodeState[0]
message_list = in_nodeState[1]
else:
self.permute_network_tuple()
net_tuple = self.read_nodestate(0)
message_list = self.read_nodestate(1)
# If not bootstrapped, do ring network propagation. Else, do fully-complete style propagation.
if do_mesh_propagation == "not set":
if in_nodeState:
do_mesh_propagation = in_nodeState[12]
else:
do_mesh_propagation = self.read_nodestate(12)
Primitives.log("Doing mesh propagation: "+str(do_mesh_propagation), in_log_level="Debug")
# Network not bootstrapped yet, do ring network propagation
if message[:16] != ring_prop:
message = ring_prop + ":" + message
if not do_mesh_propagation:
if in_nodeState:
self.write_nodestate(in_nodeState, 1, message_list)
if do_mesh_propagation:
""" network bootstrapped or do_mesh_propagation override is active, do fully-complete/mesh style
message propagation """
Primitives.log("Message propagation mode: fully-complete/mesh", in_log_level="Debug")
for connection in net_tuple:
self.send(connection, message, sign=False) # Send a message to each node( = Broadcast)
if in_nodeState:
return nodeState
def run_external_command(self, command):
global command_execution_lock
# Given a string containing a UNIX command, execute it.
# Disable this by setting command_execution=False
# Returns 0 -> (int)
self.lock(command_execution_lock, name="command execution")
os.system(command)
self.release(command_execution_lock, name="command execution")
return 0
def write_to_page(self, page_id, data, signing=True, filter_duplicate_data=True):
global fileIO_lock
""" Append data to a given pagefile by ID."""
self.lock(fileIO_lock, name="File I/O")
Primitives.log("Writing to page:" + page_id, in_log_level="Info")
os.chdir(self.read_nodeConfig(6))
# Write page data pseudonymously with ADDR_ID
if signing:
"""ADDR_ID is a cryptographic hash of this node''s externally reachable IP address, salted with a unique
random token generated upon initialization. ADDR_ID is used as an anonymous, common identifier
which external nodes can use to direct messages to anonymous destination nodes without requiring them
to reveal their identity."""
ADDR_ID = self.read_nodeConfig(5)
data_line = str(ADDR_ID + ":" + data + "\n")
# Write data completely anonymously
else:
data_line = str(data + "\n")
file_path = ("../inter/mem/" + page_id + ".bin")
print('Writing ' + data + " to " + page_id + ".bin")
this_page = open(file_path, "a+")
this_page.write(data_line)
this_page.close()
if filter_duplicate_data:
# Remove duplicate data
unique_lines = set(open(file_path).readlines())
open(file_path, 'w').writelines(set(unique_lines))
self.release(fileIO_lock, name="File I/O")
def respond(self, connection, msg):
""" We received a message, reply with an appropriate response.
Doesn't return. """
global nodeState
global ring_prop
self.lock(respond_lock, name="Respond lock")
full_message = str(msg)
message = full_message[17:] # Message without signature
sig = full_message[:16] # Just the signature
address = connection[1]
net_tuple = self.read_nodestate(0)
message_list = self.read_nodestate(1)
propagation_allowed = True
original_path = self.read_nodeConfig(6)
os.chdir(original_path)
if address == "127.0.0.1":
Primitives.log("Received message from 127.0.0.1; This is a violation of protocol; "
"replacing address with Local IP.", in_log_level="Debug")
# Replying to localhost is strictly disallowed. Replace localhost with actual local IP.
address = Primitives.get_local_ip()
# Introduce additional network mixing anonymity by introducing a random delay makes it difficult or
# impossible to derive message paths through timing analysis alone.
sleep(random.uniform(0.012, 0.08)) # 12mS - 80mS
if sig == ring_prop:
"""Sending messages in ring mode adds a special signature on top of the signed message, so to get
the actual signature(not the ring propagation delimiter) we need to remove the delimiter, then
process the message as usual."""
message = full_message[17:] # Remove the ring propagation delimiter
message_sig = message[:16] # Get the actual message signature
sig = message_sig # Make the signature local variable point to the actual message signature, not ring_prop
message = message[17:] # Remove the message signature from the message to reveal just the payload
new_message_list = list(message_list)
new_message_list.append(message_sig)
self.write_nodestate(nodeState, 1, new_message_list)
"""Axonet stores the signatures of all received messages in a global lookup table. Messages are propagated in
a way which (inevitably) leads to most nodes receiving identical messages from many independent sending nodes.
Nodes only need to respond to each message once, so the message signatures are stored in a global lookup table
(message_list = nodeState[1]).
Depending on the network configuration/architecture, nodes will either refuse
to send messages with signatures that appear in the message_list(ring propagation), or refuse to respond to
messages with signatures appearing in the message_list(mesh/fully-complete message propagation)"""
if sig in message_list:
not_responding_to_msg = str("Not responding to " + sig)
Primitives.log(not_responding_to_msg, in_log_level="Debug")
try:
# This message is either unique, or it has been sent with a special signature indicates that
# it should not be propagated(no_prop).
if sig not in message_list or sig == no_prop:
# Append message signature to the message list, or in the case of sig=no_prop, do nothing.
if sig != no_prop and propagation_allowed:
new_message_list = list(message_list)
new_message_list.append(sig)
self.write_nodestate(nodeState, 1, new_message_list)
# End of respond()
# Propagate the message to the rest of the network.
Primitives.log(str('Broadcasting: ' + full_message), in_log_level="Debug")
propagation_mode = self.read_nodestate(12)
self.broadcast(full_message, do_mesh_propagation=propagation_mode)
# Don't spam stdout with hundreds of kilobytes of text during pagefile syncing/file transfer
if len(message) < 100 and "\n" not in message:
message_received_log = str('Received: ' + message
+ " (" + sig + ")" + " from: " + address)
# e.x "Client -> [log level]: Received: echo (0123456789abcdef) from: 127.0.0.1"
else:
message_received_log = str('Received: ' + message[:16] + "(message truncated)"
+ " (" + sig + ")" + " from: " + address)
Primitives.log(message_received_log, in_log_level="Info")
# If received, send back to confirm the presence of successful two-way communication
if message == "echo":
import echo
""" Simple way to test our connection to a given node."""
Primitives.log("echoing...", in_log_level="Info")
echo.initiate(self.read_nodestate(0), self.read_nodeConfig(8), connection, no_prop)
# Terminate this node and quit
if message == "stop":
""" instruct all nodes to disconnect from each other and exit cleanly."""
# Enable fully-complete/mesh propagation, regardless of actual network architecture,
# to peer pressure isolated/edge nodes into dying on command
# Inform localhost to follow suit.
localhost_connection = (self.read_nodeConfig(11), "127.0.0.1")
self.send(localhost_connection, "stop")
# The node will already be terminated by the time it gets to the end of the function and runs the
# message propagation algorithm; broadcast now, then stop
self.broadcast(full_message, do_mesh_propagation=True)
# Do so ourselves
self.terminate()
# Set various network topology attributes on-the-fly
if message.startswith('config:'):
arguments = Primitives.parse_cmd(message) # arguments[0] = variable to configure; [1] = value
print(str(arguments))
import config_client
os.chdir(this_dir)
config_client.config_argument(arguments, self.read_nodeConfig(3), self.read_nodeConfig(2))
# Instruct clients to connect to remote servers.
if message.startswith("ConnectTo:"):
"""ConnectTo: Instructs external clients to connect to remote servers.
In fully-connected mode, ConnectTo: is sent by each node being connected to when a new node
joins the network, with one ConnectTo: flag per node in their network table], instructing the new node
to connect to [each node in their network table]. As long as all nodes respond to ConnectTo: flags,
(if network_architecture = "complete" in init_client/init_server) the
network will always be fully-connected.
Elsewhere in the documentation and code, this bootstrapping mechanism is
referred to as "address propagation"
"""
# remove the 'ConnectTo:' flag from the message, leaving only the external address to connect to.
connect_to_address = message[10:]
# lookup_socket will return 0 if we're not already connected to said address (above)
connection_status = self.lookup_socket(connect_to_address)
Primitives.log(str(net_tuple), in_log_level="Debug")
# If we're not already connected and making this connection won't break anything, connect now.
if connection_status == 0:
remote_adress_is_localhost = connect_to_address == Primitives.get_local_ip() or \
connect_to_address == "127.0.0.1"
# Don't connect to localhost multiple times;
# All kinds of bad things happen if you do.
if remote_adress_is_localhost:
not_connecting_msg = str("Not connecting to " + connect_to_address + "; That's localhost :P")
Primitives.log(not_connecting_msg, in_log_level="Warning")
else:
network_architecture = self.read_nodeConfig(8)
mesh_network = (network_architecture == "mesh") # True if network architecture is mesh
sent_by_localhost = (address == "127.0.0.1" or address == Primitives.get_local_ip())
print('\n\n')
print("\tNetwork Architecture: " + network_architecture)
print("\tNetwork Architecture is mesh: " + str(mesh_network))
print("\tRemote Address is Localhost: " + str(remote_adress_is_localhost))
print("\tReceived packet from Localhost: " + str(sent_by_localhost))
print("\n\n")
""" In a fully-connected network, act on all ConnectTo: packets;
In a mesh network, only act on ConnectTo: packets originating from localhost
(ConnectTo: is never sent with message propagation -- ConnectTo: packets received from
localhost always really originate from localhost) """
if (mesh_network and sent_by_localhost) or not mesh_network:
local_address = Primitives.get_local_ip()
# Be verbose
Primitives.log(str("self.lookup_socket() indicates that we're not"
" connected to " + connect_to_address), in_log_level="Info")
Primitives.log(str("Primitives.get_local_ip() indicates that"
" localhost = " + local_address), in_log_level="Info")
new_socket = socket.socket()
new_connection = (new_socket, connect_to_address)
# If we're not connected to said node
if not connection_status:
try:
PORT = self.read_nodeConfig(0)
self.connect(new_connection, connect_to_address, PORT)
# Connection was successful, cache address to hosts file and start listening...
self.write_to_page('hosts', connect_to_address, False)
self.listen(new_connection)
except OSError:
""" Most Likely a Bad Fie Descriptor in self.connect().
I don't know what to do about that, so we'll just warn the user."""
Primitives.log(str("Unable to connect to: " + str(connect_to_address)),
in_log_level="Warning")
# Don't connect to an address we're already connected to...
elif connection_status != 0:
already_connected_msg = str("Not connecting to " +
connect_to_address +
";" +
"We're already connected.")
Primitives.log(already_connected_msg, "Warning")
# If allowed by client configuration, execute a shell command in the operating system's default terminal
if message.startswith('exec:'):
import exec
exec.initiate(message, self.read_nodeConfig(1))
# Create a new pagefile in src/inter/mem which will presumably store some data generated by a
# concurrent network algorithm
if message.startswith("newpage:"):
""" Create a new pagefile that we'll presumably do some
parallel or distributed operations with.
e.x newpage:(64-bit identifier provided by sender)"""
page_id = message[8:]
new_filename = str("../inter/mem/" + page_id + ".bin")
Primitives.log("Creating new page with id: " + str(page_id), in_log_level="Info")
os.chdir(original_path)
newpage = open(new_filename, "a+")
page_list = self.read_nodestate(6)
page_list.append(newpage)
self.write_nodestate(nodeState, 6, page_list)
# Retrieve a file from distributed memory by instructing all nodes to sync the contents of some pagefile
if message.startswith("fetch:"):
# fetch:pagefile:[optional task identifier]
""" Broadcast the contents of [page id] to maintain distributed memory """
arguments = Primitives.parse_cmd(message)
page_id = arguments[0]
# Read contents of page
os.chdir(original_path)
pagefile = open("../inter/mem/" + page_id + ".bin", "r+")
page_lines = pagefile.readlines()
pagefile.close()
# Don't sync comments
for string in page_lines:
if string[:1] == "#":
page_lines.remove(string)
page_contents = ''.join(set(list(page_lines)))
print("Page contents:")
try:
election_list = self.read_nodestate(9)
module_loaded = self.read_nodestate(5)
if arguments[1] == "discovery" and module_loaded == "discovery":
network_size = self.read_nodeConfig(7)
is_cluster_rep = (Primitives.find_representative(election_list, "discovery-" + page_id)
== Primitives.get_local_ip())
print("(fetch) page lines: " + str(len(page_lines)))
print("(fetch) network size: " + str(network_size))
if is_cluster_rep and network_size > len(page_lines):
print("(fetch) syncing " + page_id + ".bin" + "...")
sync_msg = self.prepare("sync:" + page_id + ":" + page_contents, salt=False)
out_sig = sync_msg[:16]
if out_sig not in message_list:
self.broadcast(sync_msg, do_mesh_propagation=False)
else:
print("(fetch) not syncing " + page_id + ".bin" + "..." + "; All contributions"
" have been written...")
self.write_nodestate(module_loaded, 5, "") # unload 'discovery'
# Else if arguments[1] doesn't exist queue a normal fetch: routine
except TypeError:
sync_msg = self.prepare("sync:" + page_id + ":" + page_contents)
self.broadcast(sync_msg, do_mesh_propagation=True)
# Write received pagefile data to disk, and process received data
if message.startswith("sync:"):
""" Update our pagefile with data from another node (such as another node's completed work)
Translation: write arbitrary data to page [page id]
Syntax: sync:(page id):(data)
"""
os.chdir(original_path)
page_id = message[5:][:16] # First 16 bytes after removing the 'sync:' flag
sync_data = message[22:]
print("Message: ")
print("\n\nSync Data: " + sync_data)
Primitives.log("Syncing " + sync_data + " into page:" + page_id, in_log_level="Debug")
file_path = "../inter/mem/" + page_id + ".bin"
file_exists = False
try:
raw_lines = open(file_path, "r+").readlines()
# Don't include comments
valid_pagelines = [raw_line for raw_line in raw_lines
if raw_line != "\n" and raw_line[:2] != "##"]
line_count = len(valid_pagelines)
file_exists = True
except FileNotFoundError:
Primitives.log("Cannot open a non-existent page")
valid_pagelines = [] # Stop PyCharm from telling me this is referenced before assignment
line_count = 0
if file_exists:
duplicate = False
local = False
network_size = self.read_nodeConfig(7)
Primitives.log("Receiving " + str(len(sync_data)) + " bytes of data from network",
in_log_level="Info")
for line in valid_pagelines:
if self.read_nodeConfig(2) == "Debug":
print("Line: " + line)
print('Data: ' + sync_data)
if line == sync_data:
duplicate = True
Primitives.log("Not writing duplicate data into page " + page_id)
break
if not duplicate:
data_id = sync_data[:16]
local_id = sha3_224(Primitives.get_local_ip().encode()).hexdigest()[:16]
if data_id == local_id:
# Don't re-write data from ourselves. We already did that with 'corecount'.
print("Not being hypocritical in page " + page_id)
local = True
if not local:
if sync_data == "" or sync_data == " " or sync_data == "\n":
pass
else:
if self.read_nodeConfig(2) == "Debug":
module_loaded = self.read_nodestate(5)
do_write = False
if module_loaded == "discovery":
if line_count < network_size:
do_write = True
else:
do_write = True
if do_write:
print("Writing " + sync_data + "to page " + page_id)
self.write_to_page(page_id, sync_data, signing=False)
else:
Primitives.log("Writing " + str(len(sync_data)) + " bytes to " + page_id + ".bin",
in_log_level="Info")
# https://stackoverflow.com/a/1216544
# https://stackoverflow.com/users/146442/marcell
# The following two lines of code are the work were written by "Marcel" from StackOverflow.
# Remove duplicate lines from page
unique_lines = set(open(file_path).readlines())
open(file_path, 'w').writelines(set(unique_lines))
# Remove any extra newlines from page
raw_lines = list(set(open(file_path).readlines()))
existing_lines = list(set(
[raw_line for raw_line in raw_lines
if raw_line != "\n" and raw_line[:2] != "##"]))
# Write changes to page
open(file_path, 'w').writelines(set(existing_lines))
# Wait for each node to contribute before doing module-specific I/O
Primitives.log("\n\t" + str(len(existing_lines)) + " Node(s) have contributed to the network."
"\n The network tuple(+1) is of length: "
+ str(len(net_tuple) + 1), in_log_level="Debug")
if len(existing_lines) >= network_size:
module_loaded = ""
self.write_nodestate(nodeState, 5, module_loaded)
# We've received contributions from every node on the network.
# Now do module-specific I/O
else:
module_loaded = self.read_nodestate(5)
election_list = self.read_nodestate(9)
is_cluster_rep = self.read_nodestate(11)
print("sync: module loaded: " + module_loaded)
if module_loaded == "discovery":
# TODO: Make this support multiple peer discoveries without reinitializing
hosts_pagefile = ''.join(
[item[0][10:] for item in election_list if item[0][:10] == "discovery-"])
print("(sync)Existing lines: " + str(len(existing_lines)))
print('(sync)Network size: ' + str(network_size))
print("(sync)Lines: " + str(existing_lines))
if is_cluster_rep and network_size > len(existing_lines):
print("(sync)Not done...")
print("(sync) fetching " + page_id + ".bin" + "...")
self.broadcast(self.prepare("fetch:" + hosts_pagefile + ":discovery"),
do_mesh_propagation=False)
elif len(existing_lines) >= network_size:
print(
"(sync) not fetching: " + page_id + ".bin" + '; All contributions have been written')
if message.startswith("find:") or message.startswith("reset:"):
import finder
import readPartNumbers
os.chdir(this_dir)
line_number_list = []
local_ip = Primitives.get_local_ip()
directory_server = self.read_nodeConfig(10)
our_parts = readPartNumbers.find_my_parts(local_ip, directory_server, path_to_client=this_dir)
for item in our_parts:
print(item)
line_num = item[2]
line_number_list.append(line_num)
print(line_num)
sub_node = self.read_nodeConfig(3)
log_level = self.read_nodeConfig(2)
finder.respond_start(message, sub_node, log_level, line_number_list=line_number_list)
# Provide server's a means of communicating readiness to clients. This is used during file proxying
# to form a feedback loop between the proxy and client, that way the client doesn't ever exceed the
# maximum channel capacity(i.e bandwidth) of it's connection to the proxy server.
if message.startswith("notify:"):
arguments = Primitives.parse_cmd(message)
if arguments[0] == "something":
pass # Do something about it
# Disconnect some misbehaving node and pop it from network tuple
if message.startswith("remove:"):
address_to_remove = message[7:]
try:
# Disconnect from remote node.
# Don't disconnect from localhost. That's what self.terminate is for.
if address_to_remove != Primitives.get_local_ip() and address_to_remove != "127.0.0.1":
# Lookup the socket of the address to remove
sock = self.lookup_socket(address_to_remove)
if sock:
Primitives.log("Remove -> Disconnecting from " + address_to_remove,
in_log_level="Info")
connection_to_remove = (sock, address_to_remove)
Primitives.log(str("\tWho's connection is: " + str(connection_to_remove)),
in_log_level="Debug")
self.disconnect(connection_to_remove)
else:
Primitives.log("Not disconnecting from a non-existent connection",
in_log_level="Warning")
else:
Primitives.log("Not disconnecting from localhost, dimwit.", in_log_level="Warning")
except (ValueError, TypeError):
# Either the address we're looking for doesn't exist, or we're not connected it it.
Primitives.log(str("Sorry, we're not connected to " + address_to_remove),
in_log_level="Warning")
# Localhost needs to remove said node too!
localhost_conn = (self.read_nodeConfig(11), "127.0.0.1")
self.send(localhost_conn, no_prop + ":" + message, sign=False)
# Start a network election which selects a suitable node to do some task
if message.startswith("vote:"):
import vote
arguments = Primitives.parse_cmd(message)
reason = arguments[0]
self.write_nodestate(nodeState, 10, True)
# Instead of making global changes to the nodeState, pass a new nodeState to vote
# with the appropriate parameters changed...
new_nodestate = vote.respond_start(reason, nodeState)
self.overwrite_nodestate(new_nodestate)
# Participate in a network election by entering as a candidate
if message.startswith("campaign:"):
# example message: campaign:do_stuff:01234566789:192.168.53.60
import vote
election_details = Primitives.parse_cmd(message) # ("reason", "representative")
reason = election_details[0]
election_list = self.read_nodestate(9)
election_tuple_index = Primitives.find_election_index(election_list, reason)
print(election_tuple_index)
# If this node hasn't yet initialized it's election_list for (reason, "TBD") or (reason, representative)
if election_tuple_index == -1:
self.write_nodestate(nodeState, 10, True)
vote.respond_start(reason, nodeState)
Primitives.log("Received a campaign: flag out of order(i.e before the vote: flag)."
"Attempting to initiate our election protocol with any information we"
"can collect.", in_log_level="Warning")
# This node has initialized it's election_list, do actual campaign work...
# If election_list[election_tuple_index] is not -1 or "TBD" then that election has already completed
# so we don't want to disrupt it by continuing to campaign after-the-fact...
elif election_list[election_tuple_index][1] == "TBD":
campaign_tuple = tuple(election_details)
campaign_list = self.read_nodestate(8)
campaign_list.append(campaign_tuple)
# Extract just the campaigns for the task at hand from the campaign_list.
# (The campaign_list contains contributions for all current and previous tasks)
this_campaign_list = [item for item in campaign_list if item[0].startswith(reason)]
this_campaign_list = list(set(this_campaign_list)) # Remove any duplicate entries
Primitives.log(str(len(this_campaign_list)) + " nodes have cast votes for "+election_details[0])
Primitives.log("Network size: "+str(self.read_nodeConfig(7)))
# If all votes are cast, elect a leader.
network_size = self.read_nodeConfig(7)
if len(this_campaign_list) == network_size:
# The node with the greatest campaign token is elected cluster representative.
campaign_tokens = [campaign_tuple[1] for campaign_tuple in campaign_list
if campaign_tuple[0] == reason]
winning_token = max(campaign_tokens)
winning_reason = ""
winning_candidate = ""
for campaign_tuple in campaign_list:
if campaign_tuple[1] == winning_token:
winning_reason = campaign_tuple[0]
winning_candidate = campaign_tuple[2]
election_log_msg = str(winning_token) + " won the election for: " + winning_reason
Primitives.log(election_log_msg, in_log_level="Info")
this_campaign = self.read_nodestate(7) # TODO: this could cause or suffer from race conditions
Primitives.log(winning_candidate + " won the election for: " + winning_reason,
in_log_level="Info")
elect_msg = self.prepare("elect:" + winning_reason + ":" + winning_candidate, salt=False)
self.broadcast(elect_msg, do_mesh_propagation=True)
self.write_nodestate(nodeState, 11, True) # set is_cluster_rep = True
self.write_nodestate(nodeState, 11, False) # set is_cluster_rep = False
# Cleanup
self.write_nodestate(nodeState, 7, 0) # reset this_campaign to 0
self.write_nodestate(nodeState, 10, False) # clear ongoing_election
# Elect the winning node of a network election to their position as cluster representative
if message.startswith("elect:"):
# elect:reason:representative
# Parse arguments
args = Primitives.parse_cmd(message)
reason = args[0]
new_leader = args[1]
# Index of tuple containing winning node
election_list = self.read_nodestate(9)
index = Primitives.find_election_index(election_list, reason)
new_election_list = Primitives.set_leader(election_list, index, new_leader)
self.write_nodestate(nodeState, 9, new_election_list) # Update the election list
print("New election list: " + str(new_election_list))
election_winner_msg = str(new_leader) + " won the election for:" + reason
Primitives.log(election_winner_msg, in_log_level="Info")
if reason.startswith('discovery-'):
os.chdir(original_path)
import discover
op_id = reason[10:]
# Remove any previous discovery elections from the election list.
# This allows network bootstrapping to occur multiple times without reinitializing
for _election_tuple in new_election_list:
_reason = _election_tuple[0]
_index_in_new_election_list = new_election_list.index(_election_tuple)
if _reason != reason:
new_election_list.remove(_election_tuple)
self.write_nodestate(nodeState, 9, new_election_list)
self.write_nodestate(nodeState, 10, False) # Set ongoing_election = False
is_cluster_rep = (new_leader == Primitives.get_local_ip())
print("is_cluster_rep: "+str(is_cluster_rep))
Primitives.log(str(new_election_list), in_log_level="Debug")
if is_cluster_rep:
new_nodestate = discover.respond_start(nodeState, op_id, is_cluster_rep)
self.overwrite_nodestate(new_nodestate)
# Write the remote addresses of all connected nodes to the pagefile established by $discover
if message.startswith("sharepeers:"):
# sharepeers:pagefile
os.chdir(original_path)
import discover
import sharepeers
new_nodestate, op_id, is_cluster_rep = sharepeers.respond_start(message, nodeState)
self.overwrite_nodestate(new_nodestate)
print("Is cluster rep: " + str(is_cluster_rep))
discover.start(net_tuple, op_id, is_cluster_rep)
# Ring Network --> Mesh network bootstrapping routine
if message.startswith("bootstrap:"):
directory_server = self.read_nodeConfig(10)
arguments = Primitives.parse_cmd(message)
# arguments[0] = network architecture to boostrap into (e.x "mesh")
# arguments[1] = c_ext
election_list = self.read_nodestate(9)
net_architecture = arguments[0]
c_ext = int(arguments[1])
try:
print("Trying to download hosts...")
directory_server_hostsfile_contents = Primitives.download_file(directory_server + "hosts.bin")
directory_server_hosts = directory_server_hostsfile_contents.split('\n')
potential_peers = [line for line in directory_server_hosts
if line not in ("", '', "\n")]
print(potential_peers)
# Cache these hosts so we can use them again if the directory server becomes inaccessible
self.write_to_page('hosts', directory_server_hostsfile_contents, False)
except AttributeError:
# download_file returned an integer(1) because the directory server is not reachable
Primitives.log("Directory server not reachable... using cached hosts...")
try:
os.chdir(original_path)
hosts_lines = open("../inter/mem/hosts.bin", "r+").readlines()
potential_peers = [host_entry for host_entry in hosts_lines if host_entry != "\n"]
if len(potential_peers) == 0:
raise FileNotFoundError("No potential peers found; hosts.bin empty")
except FileNotFoundError:
# Fuck fuck fuck this is bad!
Primitives.log("No cached hosts found, refusing to bootstrap!")
potential_peers = 1
chosen_peers = []
if potential_peers and potential_peers != 1:
for peer in potential_peers:
if peer == Primitives.get_local_ip() + "\n": # Do not try to pick ourselves as a remote node
potential_peers.remove(peer)
if potential_peers != 1:
if net_architecture == "mesh":
print("Network tuple:")
print(str(net_tuple))
this_node = (self.read_nodeConfig(11), "127.0.0.1")
# Disconnect from everything other than localhost
for peer in net_tuple:
if peer != this_node:
self.disconnect(peer)
net_tuple = self.read_nodestate(0) # Refresh the network tuple after disconnecting
else:
pass # Don't disconnect from localhost
# Select remote peers to bootstrap with
for i in range(0, c_ext):
try:
chosen_peer = random.choice(potential_peers)
potential_peers.remove(chosen_peer)
chosen_peers.append(chosen_peer.strip("\n"))
except IndexError:
break
Primitives.log("Disassociation successful. Ready for bootstrap...", in_log_level="Info")
# Bootstrap!
for peer_address in chosen_peers:
external_connection = (socket.socket(), peer_address)
self.connect(external_connection, peer_address, self.read_nodeConfig(0))
# Great, bootstrapping was successful
# Set global message propagation mode to mesh
# This was probably already run by sharepeers: assuming peer discovery was run...
do_mesh_propagation = self.read_nodestate(12)
if not do_mesh_propagation:
do_mesh_propagation = True
self.write_nodestate(nodeState, 12, do_mesh_propagation)
# Catch all errors in repond() and log the traceback to stdout. This keeps the from crashing due to
# random errors which may occur in other modules that may/may not have proper exception handling
except Exception:
traceback.print_exc()
self.release(respond_lock, name="Respond lock")
def listen(self, connection):
# Listen for incoming messages and call self.respond() to respond to them.
# Also, deal with disconnections as they are most likely to throw errors here.
# Returns nothing.
global receive_lock
def listener_thread(conn):
global receive_lock
in_sock = conn[0]
terminated = self.read_nodestate(3)
listener_terminated = False # Terminate when set
while not listener_terminated and not terminated:
incoming = Primitives.receive(conn)
raw_message = incoming
try:
if incoming:
self.respond(conn, raw_message)
except TypeError:
conn_severed_msg = str("Connection to " + str(in_sock)
+ "was severed or disconnected."
+ "(TypeError: listen() -> listener_thread()")
Primitives.log(conn_severed_msg, in_log_level="Warning")
self.disconnect(conn)
listener_terminated = True
if incoming == 1:
self.disconnect(conn)
conn_not_existent_msg = str("Connection to " + str(in_sock) +
"doesn't exist, terminating listener_thread()")
Primitives.log(conn_not_existent_msg, in_log_level="Warning")
listener_terminated = True
# Start listener in a new thread
threading.Thread(target=listener_thread, args=(connection,), name='listener_thread').start()
def terminate(self):
# Disconnect from the network and exit the client cleanly.
# Returns 0 -> int (duh)
net_tuple = self.read_nodestate(0)
page_list = self.read_nodestate(6)
Primitives.log("Safely terminating our connections...", in_log_level="Warning")
index = 0
for file in page_list:
Primitives.log("Closing pages..", in_log_level="Info")
file.close()
try:
os.remove(file.name)
except FileNotFoundError:
Primitives.log("Not removing non-existent page")
Primitives.log(str("Terminating connection to "), in_log_level="Info")
for connection in net_tuple:
address = connection[1]
Primitives.log(str("Terminating connection to " + address), in_log_level="Info")
self.disconnect(connection, disallow_local_disconnect=False)
_localhost = self.read_nodeConfig(11)
_localhost.close()
index += 1
Primitives.log("Quietly Dying...")
self.write_nodestate(nodeState, 3, True) # Set terminated = True
# noinspection PyProtectedMember
# kill oneself and all children (threads) with so much passion that
# the python dev's made this method private.
os._exit(0)
def initialize(self, port=3705, net_architecture="complete", remote_addresses=None, command_execution=False,
default_log_level="Debug", modules=None, net_size=0, input_directory_server=""):
# Initialize the client, setup nodeConfig, bootstrap...
global nodeConfig
global Primitives
SALT = secrets.token_hex(16) # Generate SALT
# nodeConfig assignments
self.write_nodeConfig(nodeConfig, 0, port)
self.write_nodeConfig(nodeConfig, 1, command_execution)
self.write_nodeConfig(nodeConfig, 2, default_log_level)
# nodeConfig[3] isn't user configurable
Primitives = primitives.Primitives(self.read_nodeConfig(3), self.read_nodeConfig(2))
self.write_nodeConfig(nodeConfig, 4, SALT)
self.write_nodeConfig(nodeConfig, 5, Primitives.gen_addr_id(SALT)) # Generate ADDR_ID
# nodeConfig[6] isn't user configurable
self.write_nodeConfig(nodeConfig, 7, net_size)
self.write_nodeConfig(nodeConfig, 8, net_architecture)
self.write_nodeConfig(nodeConfig, 9, None) # We'll reset this shortly if needed
self.write_nodeConfig(nodeConfig, 10, input_directory_server)
# nodeConfig[11] is magic; don't touch
if remote_addresses:
output_node = random.choice(remote_addresses)
self.write_nodeConfig(nodeConfig, 9, output_node)
new_loaded_modules = []
# Import loaded modules
for item in modules:
import_str = "import " + item
new_loaded_modules.append(item)
exec(import_str)
self.write_nodestate(nodeState, 4, new_loaded_modules)
# Stage 0
Primitives.log("Initializing...", in_log_level="Info")
localhost_connection = (self.read_nodeConfig(11), '127.0.0.1')
try:
self.connect(localhost_connection, 'localhost', port, local=True)
Primitives.log("Connection to localhost successful", in_log_level="Info")
Primitives.log("Starting listener on localhost...", in_log_level="Info")
self.listen(localhost_connection)
except ConnectionRefusedError:
Primitives.log("Connection to localhost unsuccessful; check that your server is "
"initialized, and try again later.", in_log_level="Warning")
quit(1)
except FileNotFoundError:
pass
Primitives.log("Attempting to connect to remote server(s)... (Initiating stage 1)",
in_log_level="Info")
# Stage 1
if remote_addresses:
for remote_address in remote_addresses:
# Join the network if one already exists...
sock = socket.socket()
try:
connection = (sock, remote_address)
self.connect(connection, remote_address, port)
Primitives.log(str("Starting listener on " + remote_address), in_log_level="")
self.listen(connection)
if net_architecture == "complete":
self.send(connection, no_prop + ":echo", sign=False)
except ConnectionRefusedError:
Primitives.log("Unable to connect to remove server; Failed to bootstrap.",
in_log_level="Warning")
else:
Primitives.log("Initializing with no remote connections...", in_log_level="Info")
|
RecoderRobotData.py
|
# MIT License.
# Copyright (c) 2020 by BioicDL. All rights reserved.
# Created by LiuXb on 2020/11/24
# -*- coding:utf-8 -*-
"""
@Modified:
@Description:
"""
import threading
import time
import queue
from deepclaw.driver.arms.URController_rtde import URController
import pickle
# receive
class GetRobotData(object):
def __init__(self):
self.flag = True
def stop(self):
self.flag = False
# push data to buffer
def run(self, robot: URController, data_buffer: queue.Queue):
while self.flag:
status = robot.get_state()
time.sleep(0.01)
time_stamp = time.time()
status.update({'time': time_stamp})
data_buffer.put(status)
# print(data_buffer.get())
# write
class SaveRobotData(object):
def __init__(self):
self.flag = True
def stop(self):
self.flag = False
def run(self, data_buffer: queue.Queue, filename: str):
while self.flag:
time.sleep(0.01)
if data_buffer.empty():
continue
else:
dd = data_buffer.get()
with open(filename, "ab") as f:
pickle.dump(dd, f)
class MoveRobot(object):
def __init__(self):
self.flag = True
self.action = None
self.joint = None
def stop(self):
self.flag = False
def set_joints(self, joint):
self.joint = joint
def run(self, robot: URController, data_buffer: queue.Queue = queue.Queue(maxsize=5000)):
# get data
gd = GetRobotData()
read_thread = threading.Thread(target=gd.run, args=(robot, data_buffer,), daemon=True)
read_thread.start()
srd = SaveRobotData()
write_thread = threading.Thread(target=srd.run, args=(data_buffer, 'test.result'), daemon=True)
write_thread.start()
# robot move
robot.move_j(self.joint, 2.8, 2.2)
gd.stop()
srd.stop()
if __name__ == '__main__':
rb = URController('../../configs/robcell-ur5-rg6-d435/ur5.yaml')
print('Start move!')
joints_pos = [-1.41319307, -1.51162964, -1.66329875, -1.50447379, 1.53746051, 0.14490873]
db = queue.Queue(maxsize=0)
x = MoveRobot()
x.set_joints(joints_pos)
x.run(rb, db)
# state = robot.get_state()
# print(state)
rb.go_home()
print('reach home pose')
# for i in range(10):
# status = robot.get_state()
# time_stamp = time.time()
# status.update({'time': time_stamp})
# print(status)
# time.sleep(0.5)
# with open("dict", "ab") as f:
# pickle.dump(status, f)
#
print('============================================')
with open("test.result", 'rb') as f:
while True:
try:
aa = pickle.load(f)
print(aa)
except EOFError:
break
|
spatialquerytests.py
|
import logger
import time
import unittest
import threading
from threading import Thread
from membase.helper.rebalance_helper import RebalanceHelper
from couchbase_helper.cluster import Cluster
from basetestcase import BaseTestCase
from remote.remote_util import RemoteMachineShellConnection
import json
import sys
from basetestcase import BaseTestCase
from membase.helper.spatial_helper import SpatialHelper
class SpatialQueryTests(BaseTestCase):
def setUp(self):
self.helper = SpatialHelper(self, "default")
super(SpatialQueryTests, self).setUp()
self.log = logger.Logger.get_logger()
self.helper.setup_cluster()
self.cluster = Cluster()
self.servers = self.helper.servers
def tearDown(self):
super(SpatialQueryTests, self).tearDown()
def test_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
def test_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init(data_set)
def test_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init(data_set)
def test_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init(data_set)
def test_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
def test_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init(data_set)
def test_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init(data_set)
def test_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Make range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init(data_set)
## Rebalance In
def test_rebalance_in_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_in_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance In and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._rebalance_cluster(data_set)
#Rebalance Out
def test_rebalance_out_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._rebalance_cluster(data_set)
def test_rebalance_out_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._rebalance_cluster(data_set)
# Warmup Tests
def test_warmup_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init_integration(data_set)
def test_warmup_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_warmup_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Warmup with range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init_integration(data_set)
# Reboot Tests
def test_reboot_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._query_test_init_integration(data_set)
def test_reboot_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._query_test_init_integration(data_set)
def test_reboot_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Reboot with range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._query_test_init_integration(data_set)
# Failover Tests
def test_failover_simple_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Failover and limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"simple dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_bbox_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and bounding box queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_bbox_queries()
self._failover_cluster(data_set)
def test_failover_simple_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and limit queries on a multidimensional "
"dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_skip_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and skip (and limit) queries on a "
"multidimensional dataset with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_skip_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_range_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_queries()
self._failover_cluster(data_set)
def test_failover_multidim_dataset_range_and_limit_queries(self):
num_docs = self.helper.input.param("num-docs")
self.log.info("description : Rebalance Out and range queries with limits on a "
"multidimensional with {0} docs".format(num_docs))
data_set = MultidimDataSet(self.helper, num_docs)
data_set.add_range_and_limit_queries()
self._failover_cluster(data_set)
###
# load the data defined for this dataset.
# create views and query the data as it loads.
# verification is optional, and best practice is to
# set to False if you plan on running _query_all_views()
# later in the test case
###
def _query_test_init(self, data_set, verify_results = True):
views = data_set.views
# start loading data
t = Thread(target=data_set.load,
name="load_data_set",
args=())
t.start()
# run queries while loading data
while(t.is_alive()):
self._query_all_views(views, False)
time.sleep(5)
t.join()
# results will be verified if verify_results set
if verify_results:
self._query_all_views(views, verify_results)
else:
self._check_view_intergrity(views)
def _query_test_init_integration(self, data_set, verify_results = True):
views = data_set.views
inserted_keys = data_set.load()
target_fn = ()
if self.helper.num_nodes_reboot >= 1:
target_fn = self._reboot_cluster(data_set)
elif self.helper.num_nodes_warmup >= 1:
target_fn = self._warmup_cluster(data_set)
elif self.helper.num_nodes_to_add >= 1 or self.helper.num_nodes_to_remove >= 1:
target_fn = self._rebalance_cluster(data_set)
t = Thread(target=self._query_all_views(views, False))
t.start()
# run queries while loading data
while t.is_alive():
self._rebalance_cluster(data_set)
time.sleep(5)
t.join()
# results will be verified if verify_results set
if verify_results:
self._query_all_views(views, verify_results)
else:
self._check_view_intergrity(views)
##
# run all queries for all views in parallel
##
def _query_all_views(self, views, verify_results = True):
query_threads = []
for view in views:
t = RunQueriesThread(view, verify_results)
query_threads.append(t)
t.start()
[t.join() for t in query_threads]
self._check_view_intergrity(query_threads)
##
# If an error occured loading or querying data for a view
# it is queued and checked here. Fail on the first one that
# occurs.
##
def _check_view_intergrity(self, thread_results):
for result in thread_results:
if result.test_results.errors:
self.fail(result.test_results.errors[0][1])
if result.test_results.failures:
self.fail(result.test_results.failures[0][1])
###
# Rebalance
###
def _rebalance_cluster(self, data_set):
if self.helper.num_nodes_to_add >= 1:
rebalance = self.cluster.async_rebalance(self.servers[:1],
self.servers[1:self.helper.num_nodes_to_add + 1],
[])
self._query_test_init(data_set)
rebalance.result()
elif self.helper.num_nodes_to_remove >= 1:
rebalance = self.cluster.async_rebalance(self.servers[:1],[],
self.servers[1:self.helper.num_nodes_to_add + 1])
self._query_test_init(data_set)
rebalance.result()
def _failover_cluster(self, data_set):
failover_nodes = self.servers[1 : self.helper.failover_factor + 1]
try:
# failover and verify loaded data
#self.cluster.failover(self.servers, failover_nodes)
self.cluster.failover(self.servers, self.servers[1:2])
self.log.info("120 seconds sleep after failover before invoking rebalance...")
time.sleep(120)
rebalance = self.cluster.async_rebalance(self.servers,
[], self.servers[1:2])
self._query_test_init(data_set)
msg = "rebalance failed while removing failover nodes {0}".format(failover_nodes)
self.assertTrue(rebalance.result(), msg=msg)
#verify queries after failover
self._query_test_init(data_set)
finally:
self.log.info("Completed the failover testing for spatial querying")
###
# Warmup
###
def _warmup_cluster(self, data_set):
for server in self.servers[0:self.helper.num_nodes_warmup]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
self.log.info("Node {0} should be warming up ".format(server.ip))
time.sleep(120)
self._query_test_init(data_set)
# REBOOT
def _reboot_cluster(self, data_set):
try:
for server in self.servers[0:self.helper.num_nodes_reboot]:
shell = RemoteMachineShellConnection(server)
if shell.extract_remote_info().type.lower() == 'windows':
o, r = shell.execute_command("shutdown -r -f -t 0")
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} is being stopped".format(server.ip))
elif shell.extract_remote_info().type.lower() == 'linux':
o, r = shell.execute_command("reboot")
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} is being stopped".format(server.ip))
time.sleep(120)
shell = RemoteMachineShellConnection(server)
command = "/sbin/iptables -F"
o, r = shell.execute_command(command)
shell.log_command_output(o, r)
shell.disconnect()
self.log.info("Node {0} backup".format(server.ip))
finally:
self.log.info("Warming-up server ..".format(server.ip))
time.sleep(100)
class View:
def __init__(self, helper, index_size, fn_str, name='dev_test_view',
create_on_init=True):
self.helper = helper
self.index_size = index_size
self.name = name
self.log = logger.Logger.get_logger()
# Store failures in here. Don't forget to add them manually,
# else the failed assertions won't make the whole test fail
self._test_results = unittest.TestResult()
# queries defined for this view
self.queries = []
if create_on_init:
self.helper.create_index_fun(name, fn_str)
class SimpleDataSet:
def __init__(self, helper, num_docs):
self.helper = helper
self.num_docs = num_docs
self.views = self._create_views()
self.name = "simple_dataset"
def _create_views(self):
view_fn = 'function (doc) {if(doc.geometry !== undefined || doc.name !== undefined ) { emit(doc.geometry, doc.name);}}'
return [View(self.helper, self.num_docs, fn_str = view_fn)]
def load(self):
inserted_keys = self.helper.insert_docs(self.num_docs, self.name)
return inserted_keys
def add_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"limit": 10}, 10),
QueryHelper({"limit": 3417}, 3417),
QueryHelper({"limit": view.index_size}, view.index_size),
QueryHelper({"limit": 5*view.index_size}, view.index_size)]
def add_skip_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"skip": 10}, view.index_size-10),
QueryHelper({"skip": 2985}, view.index_size-2985),
QueryHelper({"skip": view.index_size}, 0),
QueryHelper({"skip": 5*view.index_size}, 0),
QueryHelper({"skip": 2985, "limit": 1539}, 1539),
QueryHelper({"skip": view.index_size-120, "limit": 1539}, 120),
QueryCompareHelper([{"skip": 6210, "limit": 1592}],
[{"skip": 6210, "limit": 1086},
{"skip": 7296, "limit": 506}])
]
def add_bbox_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"bbox": "-180,-90,180,90"}, view.index_size),
QueryHelper({"bbox": "-900,-900,900,900"}, view.index_size),
QueryHelper({}, view.index_size),
QueryHelper({"bbox": "-900,-900,900,900"}, view.index_size),
QueryCompareHelper([{"bbox": "-900,-900,900,900"}],
[{}]),
QueryCompareHelper([{"bbox": "-117,-76,34,43"}],
[{"bbox": "-117,-76,34,-5"},
{"bbox": "-117,-5,34,43"}]),
]
def add_range_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [-180, -90], "end_range": [180, 90]},
view.index_size),
QueryHelper(
{"start_range": [-900, -900], "end_range": [900, 900]},
view.index_size),
QueryCompareHelper([{"start_range": [-900, -900],
"end_range": [900, 900]}],
[{}]),
QueryCompareHelper([{"start_range": [-117, -76],
"end_range": [34, 43]}],
[{"start_range": [-117, -76],
"end_range": [34, -5]},
{"start_range": [-117, -5],
"end_range": [34, 43]}])
]
def add_all_query_sets(self):
self.add_limit_queries()
self.add_skip_queries()
self.add_bbox_queries()
self.add_range_queries()
class MultidimDataSet:
def __init__(self, helper, num_docs):
self.helper = helper
self.num_docs = num_docs
self.views = self._create_views()
self.name = "multidim_dataset"
def _create_views(self):
view_fn = '''function (doc) {
if (doc.age !== undefined || doc.height !== undefined ||
doc.bloom !== undefined || doc.shed_leaves !== undefined) {
emit([doc.age, doc.height, [doc.bloom, doc.shed_leaves]], doc.name);
}}'''
return [View(self.helper, self.num_docs, fn_str = view_fn)]
def load(self):
inserted_keys = self.helper.insert_docs(self.num_docs, self.name)
return inserted_keys
def add_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"limit": 10}, 10),
QueryHelper({"limit": 3417}, 3417),
QueryHelper({"limit": view.index_size}, view.index_size),
QueryHelper({"limit": 5*view.index_size}, view.index_size)]
def add_skip_queries(self):
for view in self.views:
view.queries += [
QueryHelper({"skip": 10}, view.index_size-10),
QueryHelper({"skip": 2985}, view.index_size-2985),
QueryHelper({"skip": view.index_size}, 0),
QueryHelper({"skip": 5*view.index_size}, 0),
QueryHelper({"skip": 2985, "limit": 1539}, 1539),
QueryHelper({"skip": view.index_size-120, "limit": 1539}, 120),
QueryCompareHelper([{"skip": 6210, "limit": 1592}],
[{"skip": 6210, "limit": 1086},
{"skip": 7296, "limit": 506}])
]
def add_range_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [0, 0, 0],
"end_range": [1001, 13001, 13]},
view.index_size),
QueryHelper(
{"start_range": [None, 0, None],
"end_range": [1001, None, None]},
view.index_size),
QueryHelper(
{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9]},
2066),
QueryHelper(
{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]},
2562),
QueryCompareHelper(
[{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, None, 3],
"end_range": [800, 11111, 9]}]),
QueryCompareHelper(
[{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, None, 3],
"end_range": [800, None, 9]}]),
QueryCompareHelper(
[{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, 2000, 3],
"end_range": [600, 8000, 9]},
{"start_range": [500, 8000, 3],
"end_range": [600, 11111, 9]},
{"start_range": [600, 2000, 3],
"end_range": [800, 11111, 9]}])
]
def add_range_and_limit_queries(self):
for view in self.views:
view.queries += [
QueryHelper(
{"start_range": [0, 0, 0],
"end_range": [1001, 13001, 13],
"limit": self.num_docs / 2},
self.num_docs / 2),
QueryHelper(
{"start_range": [None, 0, None],
"end_range": [1001, None, None],
"limit": self.num_docs / 2},
self.num_docs / 2),
QueryHelper(
{"start_range": [500, 2000, 3],
"end_range": [800, 11111, 9],
"limit": 1000},
1000),
QueryHelper(
{"start_range": [500, -500, 3],
"end_range": [800, 11111, 9],
"limit": 5},
5),
QueryCompareHelper(
[{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9]}],
[{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"limit": 700},
{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"skip": 700,
"limit": 100},
{"start_range": [500, 1800, 3],
"end_range": [800, 11111, 9],
"skip": 800,
"limit": 10000},
]),
]
def add_all_query_sets(self):
self.add_limit_queries()
self.add_skip_queries()
self.add_range_queries()
self.add_range_and_limit_queries()
class QueryHelper:
def __init__(self, params, expected_num_docs):
self.params = params
# number of docs this query should return
self.expected_num_docs = expected_num_docs
# Put in two lists of queries, it will then join the results of the
# individual queries and compare both
class QueryCompareHelper:
def __init__(self, queries_a, queries_b):
self.queries_a = queries_a
self.queries_b = queries_b
class RunQueriesThread(threading.Thread):
def __init__(self, view, verify_results = False):
threading.Thread.__init__(self)
self.view = view
self.verify_results = verify_results
# The last retrieved results, useful when an exception happened
self._last_results = None
# Store failures in here. So we can make the whole test fail,
# normally only this thread will fail
self.test_results = unittest.TestResult()
self.helper = self.view.helper
self.log = self.view.log
def run(self):
if not len(self.view.queries) > 0 :
self.log.info("No queries to run for this view")
return
try:
self._run_queries()
except Exception:
self.log.error("Last query result:\n\n{0}\n\n"\
.format(json.dumps(self._last_results,
sort_keys=True)))
self.test_results.addFailure(self.helper.testcase, sys.exc_info())
def _run_queries(self):
for query in self.view.queries:
# Simple query
if isinstance(query, QueryHelper):
if self.verify_results:
self._last_results = self._run_query(
query.params, query.expected_num_docs)
else:
self._last_results = self._run_query(query.params)
# Compare queries, don't verify the individual queries
# but only the final result
elif isinstance(query, QueryCompareHelper):
result_keys_a = []
result_keys_b = []
for params in query.queries_a:
self._last_results = self._run_query(params)
result_keys_a.extend(
self.helper.get_keys(self._last_results))
for params in query.queries_b:
self._last_results = self._run_query(params)
result_keys_b.extend(
self.helper.get_keys(self._last_results))
if self.verify_results:
diff = set(result_keys_a) - set(result_keys_b)
self.helper.testcase.assertEqual(diff, set())
else:
self.helper.testcase.fail("no queries specified")
# If expected_num_docs is given, the results are verified
def _run_query(self, query_params, expected_num_docs=None):
params = {"debug": True}
params.update(query_params)
if expected_num_docs is not None:
self.log.info("Quering view {0} with params: {1}".format(
self.view.name, params));
results = self.helper.get_results(self.view.name, None, params)
num_keys = len(self.helper.get_keys(results))
self.log.info("{0}: retrieved value {1} expected: {2}"\
.format(self.view.name, num_keys,
expected_num_docs));
if(num_keys != expected_num_docs):
error = "Query failed: {0} Documents Retrieved, "\
"expected {1}".format(num_keys, expected_num_docs)
try:
self.helper.testcase.assertEquals(num_keys,
expected_num_docs,
error)
except Exception:
self.log.error(error)
raise
else:
return results
else:
# query without verification
self.log.info("Quering view {0} with params: {1}"\
.format(self.view.name, params));
return self.helper.get_results(self.view.name, None, params)
|
adbclient.py
|
# -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''ADB客户端,用于与ADB守护进程通信
'''
import os
import time
import socket, select
import struct
import threading
import cStringIO
from util import logger, TimeoutError
SYNC_DATA_MAX = 64 * 1024
class AdbError(RuntimeError):
pass
class Pipe(object):
'''模拟实现内存管道
'''
def __init__(self):
self._buffer = cStringIO.StringIO()
self._max_buffer_size = 4096 * 16
self._lock = threading.Lock()
self._pos = 0 # 当前读指针位置
self._write_buffer = '' # 保证每次都是整行写
def write(self, s):
self._write_buffer += s
pos = self._write_buffer.rfind('\n')
if pos <= 0: return
s = self._write_buffer[:pos]
self._write_buffer = self._write_buffer[pos:]
with self._lock:
self._buffer.seek(0, 2) # 将指针置于尾部
self._buffer.write(s)
def readline(self):
wait = False
while True:
if wait: time.sleep(0.1)
with self._lock:
self._buffer.seek(0, 2)
buffer_size = self._buffer.tell()
if buffer_size <= self._pos:
wait = True
continue
with self._lock:
self._buffer.seek(self._pos)
ret = self._buffer.readline()
if len(ret) == 0:
wait = True
continue
else:
self._pos = self._buffer.tell()
self._buffer.seek(0, 2)
buffer_size = self._buffer.tell()
if buffer_size >= self._max_buffer_size:
# 创建新的缓冲区
self._buffer.seek(self._pos)
buffer = self._buffer.read()
self._buffer.close()
self._buffer = cStringIO.StringIO()
self._buffer.write(buffer)
self._pos = 0
return ret
def read(self):
'''读取管道中的所有数据
'''
with self._lock:
self._buffer.seek(self._pos)
result = self._buffer.read()
if self._write_buffer:
result += self._write_buffer
self._write_buffer = ''
return result
class ADBPopen(object):
'''与Popen兼容
'''
class StdinPipe(object):
'''
'''
def __init__(self, sock):
self._sock = sock
def write(self, s):
# print 'send', repr(s)
self._sock.send(s)
def flush(self):
pass
def __init__(self, sock, timeout=None):
self._sock = sock
self._stdin = self.StdinPipe(sock)
self._stdout = Pipe()
self._stderr = Pipe()
self._running = True
self._timeout = timeout
if self._timeout == None: self._timeout = 0xFFFFFFFF
self._event = threading.Event() # 接收完数据的事件通知
self._thread = threading.Thread(target=self._work_thread, args=(), name=self.__class__.__name__)
self._thread.setDaemon(True)
self._thread.start()
@property
def stdin(self):
return self._stdin
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def pid(self):
return self._thread.ident
def _work_thread(self):
time0 = time.time()
while self._running and time.time() - time0 < self._timeout:
infds, outfds, errfds = select.select([self._sock, ], [], [], 1)
if len(infds) > 0:
try:
buff = self._sock.recv(4096)
if len(buff) == 0:
self._sock.close()
self._running = False
self._event.set()
return
self._stdout.write(buff)
except socket.error, e:
logger.info("接收返回数据错误: %s" % (e))
# import traceback
# traceback.print_exc()
self._stdout.write(' ') # 通知接收方退出
self._sock.close()
self._running = False
self._event.set()
return
self._sock.close()
self._sock = None
def poll(self):
'''是否存在
'''
if self._thread.is_alive():
return None
else:
return 0
def terminate(self):
'''结束
'''
self._running = False
time.sleep(1) # 等待线程退出
def communicate(self):
'''
'''
while True:
if self._event.wait(0.001) == True or self.poll() == 0:
if self._running: raise TimeoutError('execute timeout')
return self.stdout.read(), self.stderr.read()
# time.sleep(0.001)
class ADBClient(object):
'''
'''
instance_dict = {}
def __init__(self, server_addr='127.0.0.1', server_port=5037):
self._server_addr = server_addr
self._server_port = server_port
self._sock = None
self._lock = threading.Lock()
@staticmethod
def get_client(host, port=5037):
'''根据主机名获取ADBClient实例
'''
return ADBClient(host, port)
def call(self, cmd, *args, **kwds):
'''调用命令字
'''
cmd = cmd.replace('-', '_')
if cmd == 'forward' and args[1] == '--remove':
method = getattr(self, 'remove_forward')
args = args[2:]
else:
method = getattr(self, cmd)
# print args
sync = True
if kwds.has_key('sync'): sync = kwds.pop('sync')
if kwds.has_key('timeout') and not cmd in ('shell', 'install', 'uninstall', 'wait_for_device', 'reboot'): kwds.pop('timeout')
if sync:
ret = None
retry_count = kwds.pop('retry_count')
i = 0
socket_error_count = 0
while i < retry_count:
try:
self._lock.acquire()
ret = method(*args, **kwds)
break
except socket.error, e:
logger.exception(u'执行%s %s error' % (cmd, ' '.join(args)))
socket_error_count += 1
if socket_error_count <= 10: i -= 1
time.sleep(1)
except AdbError, e:
err_msg = str(e)
if 'device not found' in err_msg:
return '', 'error: device not found'
elif 'cannot bind to socket' in err_msg:
return '', err_msg
elif 'cannot remove listener' in err_msg:
return '', err_msg
elif 'device offline' in err_msg:
return '', 'error: device offline'
elif 'Bad response' in err_msg or 'Device or resource busy' in err_msg or 'closed' in err_msg: # wetest设备有时候会返回closed错误
# 需要重试
logger.exception('Run %s%s %r' % (cmd, ' '.join(args), e))
else:
raise RuntimeError(u'执行%s %s 命令失败:%s' % (cmd, ' '.join(args), e))
time.sleep(1)
if i >= retry_count - 1: raise e
except RuntimeError, e:
logger.exception(u'执行%s%s %r' % (cmd, ' '.join(args), e))
if 'device not found' in str(e):
self.wait_for_device(args[0], retry_count=1, timeout=300)
self._sock = None
return self.call(cmd, *args, **kwds)
finally:
i += 1
if self._sock != None:
self._sock.close()
self._sock = None
self._lock.release()
if ret == None: raise TimeoutError(u'Run cmd %s %s failed' % (cmd, ' '.join(args)))
if isinstance(ret, basestring):
return ret, ''
else:
return ret
else:
self._transport(args[0]) # 异步操作的必然需要发送序列号
if cmd == 'shell':
self._lock.acquire()
self._send_command('shell:' + ' '.join(args[1:]))
pipe = ADBPopen(self._sock)
self._sock = None
self._lock.release()
return pipe
def _connect(self):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for i in range(3):
try:
self._sock.connect((self._server_addr, self._server_port))
return True
except socket.error:
pass
return False
def _check_status(self):
'''检查返回状态
'''
stat = self._sock.recv(4)
if stat == "OKAY":
return True
elif stat == "FAIL":
size = int(self._sock.recv(4), 16)
val = self._sock.recv(size)
self._sock.close()
self._sock = None
raise AdbError(val)
else:
raise AdbError("Bad response: %r" % (stat,))
def _send_command(self, cmd):
data = "%04x%s" % (len(cmd), cmd)
if not self._sock: self._connect()
# logger.debug('send: %r' % data)
self._sock.send(data)
return self._check_status()
def _recv(self, size=None):
'''从socket读取数据
'''
result = ''
if size != None:
while len(result) < size:
result += self._sock.recv(size - len(result))
else:
data = self._sock.recv(4096)
while data:
result += data
data = self._sock.recv(4096)
return result
def send_command(self, cmd):
self._send_command(cmd)
size = int(self._sock.recv(4), 16)
resp = self._sock.recv(size)
# logger.debug('recv: %r' % resp[:200])
self._sock.close()
self._sock = None
return resp
def _transport(self, device_id):
self._send_command('host:transport:%s' % device_id)
def devices(self):
'''adb devices
'''
result = self.send_command('host:devices')
return result
def shell(self, device_id, cmd, **kwds):
'''adb shell
'''
cmd_line = 'shell:%s' % cmd
self._transport(device_id)
self._send_command(cmd_line)
return ADBPopen(self._sock, timeout=kwds['timeout']).communicate()
def _sync_read_mode(self, remote_path):
'''
'''
data = 'STAT' + struct.pack('I', len(remote_path)) + remote_path
self._sock.send(data)
result = self._sock.recv(16)
if result[:4] != 'STAT':
raise AdbError('sync_read_mode error')
mode, size, time = struct.unpack('III', result[4:])
return mode, size, time
def pull(self, device_id, src_file, dst_file):
'''adb pull
'''
time0 = time.time()
self._transport(device_id)
self._send_command('sync:')
mode, fsize, ftime = self._sync_read_mode(src_file)
if mode == 0:
self._sock.close()
self._sock = None
raise AdbError('remote object %r does not exist' % src_file)
data = 'RECV' + struct.pack('I', len(src_file)) + src_file
self._sock.send(data)
f = open(dst_file, 'wb')
data_size = 0
last_data = ''
while True:
result = self._sock.recv(8)
if len(result) != 8:
logger.warn('返回数据错误:%r' % result)
last_data += result
if len(last_data) < 8:
continue
else:
result = last_data[:8]
last_data = last_data[8:]
psize = struct.unpack('I', result[4:])[0] # 每个分包大小
# print psize
if result[:4] == 'DONE': break
elif result[:4] == 'FAIL':
raise AdbError(self._sock.recv(psize))
elif result[:4] != 'DATA':
raise AdbError('pull_file error')
# print fsize
result = self._recv(psize - len(last_data))
result = last_data + result
if len(result) >= psize:
last_data = result[psize:]
result = result[:psize]
else:
raise ValueError('数据长度不一致,期望值:%d 实际值:%d' % (psize, len(result)))
f.write(result)
data_size += len(result)
f.close()
self._sock.send('QUIT' + struct.pack('I', 0))
time_cost = time.time() - time0
self._sock.close()
self._sock = None
if data_size > 0:
return '%d KB/s (%d bytes in %fs)' % (int(data_size / 1000 / time_cost) if time_cost > 0 else 65535, data_size, time_cost)
else:
return ''
def push(self, device_id, src_file, dst_file):
'''adb push
'''
time0 = time.time()
try:
st = os.stat(src_file)
except WindowsError, e:
if e[0] == 2:
raise AdbError("cannot stat '%s': No such file or directory" % src_file)
else: raise e
self._transport(device_id)
self._send_command('sync:')
mode, fsize, ftime = self._sync_read_mode(dst_file)
# print mode
# print st.st_mode
s = '%s,%d' % (dst_file, st.st_mode)
data = 'SEND' + struct.pack('I', len(s)) + s
self._sock.send(data)
f = open(src_file, 'rb')
data = f.read(SYNC_DATA_MAX)
data_size = 0
while data:
# print 'send', len(data)
send_data = 'DATA' + struct.pack('I', len(data)) + data
self._sock.send(send_data)
data_size += len(data)
data = f.read(SYNC_DATA_MAX)
f.close()
data = 'DONE' + struct.pack('I', st.st_mtime)
self._sock.send(data)
result = self._sock.recv(8)
if result[:4] == 'OKAY':
self._sock.close()
self._sock = None
time_cost = time.time() - time0
return '%d KB/s (%d bytes in %fs)' % (int(data_size / 1000 / time_cost) if time_cost > 0 else 0, data_size, time_cost)
elif result[:4] == 'FAIL':
msg_len = struct.unpack('I', result[4:])[0]
error_msg = self._sock.recv(msg_len)
raise AdbError(error_msg)
else:
raise RuntimeError('unexpect data: %r' % result)
def install(self, device_id, apk_path, args='', **kwds):
'''adb install
'''
if not os.path.exists(apk_path):
raise AdbError(r'can\'t find %r to install' % apk_path)
apk_name = os.path.split(apk_path)[-1]
dst_path = '/data/local/tmp/%s' % apk_name
self.push(device_id, apk_path, dst_path)
cmdline = 'pm install ' + (args + ' ' if args else '') + dst_path
return self.shell(device_id, cmdline, **kwds)
def uninstall(self, device_id, package_name, **kwds):
'''adb uninstall
'''
cmd = 'pm uninstall %s' % package_name
return self.shell(device_id, cmd, **kwds)
def forward(self, device_id, local, remote):
'''adb forward
'''
self._send_command('host-serial:%s:forward:%s;%s' % (device_id, local, remote))
return ''
def remove_forward(self, local):
'''adb forward --remove
'''
self._send_command('host:killforward:%s' % (local))
return ''
def create_tunnel(self, device_id, remote_addr):
'''创建与手机中服务端的连接通道
'''
self._transport(device_id)
self._sock.settimeout(2)
try:
self._send_command(remote_addr)
except AdbError, e:
if 'closed' == e.args[0]:
return ''
raise
except socket.timeout, e:
logger.warn('create_tunnel timeout')
return ''
sock = self._sock
self._sock = None
return sock
def get_state(self, device_id):
'''获取设备状态
'''
return self.send_command('host-serial:%s:get-state' % (device_id))
def connect(self, device_id):
'''连接TCP端口
'''
result = self.send_command('host:connect:%s' % device_id)
return 'connected to' in result
def reboot(self, device_id, **kwds):
'''重启设备
'''
self._transport(device_id)
self._sock.settimeout(kwds['timeout'])
try:
self.send_command('reboot:')
except socket.error, e:
raise e
except:
pass
return True
def wait_for_device(self, device_id, **kwds):
'''等待设备
'''
self._send_command('host-serial:%s:wait-for-any' % (device_id))
return ADBPopen(self._sock, timeout=kwds['timeout']).communicate()
def snapshot_screen(self, device_id):
'''截屏
return: Image.Image
'''
self._transport(device_id)
self._send_command('framebuffer:')
fb_desc = self._sock.recv(13 * 4)
version = struct.unpack_from('I', fb_desc, 0)[0]
bpp = struct.unpack_from('I', fb_desc, 4)[0]
size = struct.unpack_from('I', fb_desc, 8)[0]
width = struct.unpack_from('I', fb_desc, 12)[0]
height = struct.unpack_from('I', fb_desc, 16)[0]
red_offset = struct.unpack_from('I', fb_desc, 20)[0]
red_length = struct.unpack_from('I', fb_desc, 24)[0] # @UnusedVariable
blue_offset = struct.unpack_from('I', fb_desc, 28)[0]
blue_length = struct.unpack_from('I', fb_desc, 32)[0] # @UnusedVariable
green_offset = struct.unpack_from('I', fb_desc, 36)[0]
green_length = struct.unpack_from('I', fb_desc, 40)[0] # @UnusedVariable
alpha_offset = struct.unpack_from('I', fb_desc, 44)[0]
alpha_length = struct.unpack_from('I', fb_desc, 48)[0]
if version != 1:
raise AdbError("unsupported version of framebuffer: %s" % version)
# detect order
util_map = { red_offset: 'R', blue_offset: 'B', green_offset: 'G'}
keys = util_map.keys()
keys.sort()
raw_mode = ''.join([util_map[it] for it in keys])
# detect mode
if alpha_length and alpha_offset:
mode = 'RGBA'
if bpp != 32:
raise AdbError("unsupported RGBA mode, bpp is %s" % bpp)
raw_mode += 'A'
elif alpha_offset:
mode = 'RGBX'
if bpp != 32:
raise AdbError("unsupported RGBX mode, bpp is %s" % bpp)
raw_mode += 'X'
else:
mode = 'RGB'
if bpp == 16:
raw_mode += ';16'
elif bpp == 24:
pass
else:
raise AdbError("unsupported RGB mode, bpp is %s" % bpp)
data = ''
while len(data) < size:
data += self._sock.recv(4096)
from PIL import Image
return Image.frombuffer(mode, (width, height), data, 'raw', raw_mode, 0, 1)
if __name__ == '__main__':
pass
|
__init__.py
|
"""
firebase Python package is a python interface to the Google's Firebase REST APIs
By Joe Tilsed
"""
import json
import time
import math
import socket
import requests
import datetime
import threading
import python_jwt as jwt
from gcloud import storage
from random import uniform
from requests import Session
from sseclient import SSEClient
from Crypto.PublicKey import RSA
from collections import OrderedDict
from urllib.parse import urlencode, quote
from requests.exceptions import HTTPError
from requests_toolbelt.adapters import appengine
from oauth2client.service_account import ServiceAccountCredentials
from requests.packages.urllib3.contrib.appengine import is_appengine_sandbox
name = 'firebase'
def author():
return "Joe Tilsed - https://linkedin.com/in/JoeTilsed"
class Firebase(object):
""" Firebase Interface """
def __init__(self, config):
self.api_key = config["apiKey"]
self.auth_domain = config["authDomain"]
self.database_url = config["databaseURL"]
self.storage_bucket = config["storageBucket"]
self.credentials = None
self.requests = requests.Session()
if config.get("serviceAccount"):
scopes = [
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email',
"https://www.googleapis.com/auth/cloud-platform"
]
service_account_type = type(config["serviceAccount"])
if service_account_type is str:
self.credentials = ServiceAccountCredentials.from_json_keyfile_name(config["serviceAccount"], scopes)
if service_account_type is dict:
self.credentials = ServiceAccountCredentials.from_json_keyfile_dict(config["serviceAccount"], scopes)
if is_appengine_sandbox():
adapter = appengine.AppEngineAdapter(max_retries=3)
else:
adapter = requests.adapters.HTTPAdapter(max_retries=3)
for scheme in ('http://', 'https://'):
self.requests.mount(scheme, adapter)
def auth(self):
return Auth(self.api_key, self.requests, self.credentials)
def database(self):
return Database(self.credentials, self.api_key, self.database_url, self.requests)
def storage(self):
return Storage(self.credentials, self.storage_bucket, self.requests)
class Auth:
""" Authentication Service """
def __init__(self, api_key, requests_session, credentials):
self.api_key = api_key
self.current_user = None
self.requests = requests_session
self.credentials = credentials
def sign_in_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
self.current_user = request_object.json()
return self.current_user
def create_custom_token(self, uid, additional_claims=None):
service_account_email = self.credentials.service_account_email
private_key = RSA.importKey(self.credentials._private_key_pkcs8_pem)
payload = {
"iss": service_account_email,
"sub": service_account_email,
"aud": "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit",
"uid": uid
}
if additional_claims:
payload["claims"] = additional_claims
exp = datetime.timedelta(minutes=60)
return jwt.generate_jwt(payload, private_key, "RS256", exp)
def sign_in_with_custom_token(self, token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"returnSecureToken": True, "token": token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def refresh(self, refresh_token):
request_ref = "https://securetoken.googleapis.com/v1/token?key={}".format(self.api_key)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"grantType": "refresh_token", "refreshToken": refresh_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
request_object_json = request_object.json()
return {
"userId": request_object_json["user_id"],
"idToken": request_object_json["id_token"],
"refreshToken": request_object_json["refresh_token"]
}
def get_account_info(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getAccountInfo?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_email_verification(self, id_token):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "VERIFY_EMAIL", "idToken": id_token})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def send_password_reset_email(self, email):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/getOobConfirmationCode?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"requestType": "PASSWORD_RESET", "email": email})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def verify_password_reset_code(self, reset_code, new_password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/resetPassword?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"oobCode": reset_code, "newPassword": new_password})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
def create_user_with_email_and_password(self, email, password):
request_ref = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key={}".format(
self.api_key
)
headers = {"content-type": "application/json; charset=UTF-8"}
data = json.dumps({"email": email, "password": password, "returnSecureToken": True})
request_object = requests.post(request_ref, headers=headers, data=data)
raise_detailed_error(request_object)
return request_object.json()
class Database:
""" Database Service """
def __init__(self, credentials, api_key, database_url, requests_session):
if not database_url.endswith('/'):
url = ''.join([database_url, '/'])
else:
url = database_url
self.credentials = credentials
self.api_key = api_key
self.database_url = url
self.requests = requests_session
self.path = ""
self.build_query = {}
self.last_push_time = 0
self.last_rand_chars = []
def order_by_key(self):
self.build_query["orderBy"] = "$key"
return self
def order_by_value(self):
self.build_query["orderBy"] = "$value"
return self
def order_by_child(self, order):
self.build_query["orderBy"] = order
return self
def start_at(self, start):
self.build_query["startAt"] = start
return self
def end_at(self, end):
self.build_query["endAt"] = end
return self
def equal_to(self, equal):
self.build_query["equalTo"] = equal
return self
def limit_to_first(self, limit_first):
self.build_query["limitToFirst"] = limit_first
return self
def limit_to_last(self, limit_last):
self.build_query["limitToLast"] = limit_last
return self
def shallow(self):
self.build_query["shallow"] = True
return self
def child(self, *args):
new_path = "/".join([str(arg) for arg in args])
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def build_request_url(self, token):
parameters = {}
if token:
parameters['auth'] = token
for param in list(self.build_query):
if type(self.build_query[param]) is str:
parameters[param] = quote('"' + self.build_query[param] + '"')
elif type(self.build_query[param]) is bool:
parameters[param] = "true" if self.build_query[param] else "false"
else:
parameters[param] = self.build_query[param]
request_ref = '{0}{1}.json?{2}'.format(self.database_url, self.path, urlencode(parameters))
self.path = ""
self.build_query = {}
return request_ref
def build_headers(self, token=None):
headers = {"content-type": "application/json; charset=UTF-8"}
if not token and self.credentials:
access_token = self.credentials.get_access_token().access_token
headers['Authorization'] = 'Bearer ' + access_token
return headers
def get(self, token=None, json_kwargs={}):
build_query = self.build_query
query_key = self.path.split("/")[-1]
request_ref = self.build_request_url(token)
headers = self.build_headers(token)
request_object = self.requests.get(request_ref, headers=headers)
raise_detailed_error(request_object)
request_dict = request_object.json(**json_kwargs)
if isinstance(request_dict, list):
return FirebaseResponse(convert_list_to_firebase(request_dict), query_key)
if not isinstance(request_dict, dict):
return FirebaseResponse(request_dict, query_key)
if not build_query:
return FirebaseResponse(convert_to_firebase(request_dict.items()), query_key)
if build_query.get("shallow"):
return FirebaseResponse(request_dict.keys(), query_key)
sorted_response = None
if build_query.get("orderBy"):
if build_query["orderBy"] == "$key":
sorted_response = sorted(request_dict.items(), key=lambda item: item[0])
elif build_query["orderBy"] == "$value":
sorted_response = sorted(request_dict.items(), key=lambda item: item[1])
else:
sorted_response = sorted(request_dict.items(), key=lambda item: item[1][build_query["orderBy"]])
return FirebaseResponse(convert_to_firebase(sorted_response), query_key)
def push(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.post(
request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8")
)
raise_detailed_error(request_object)
return request_object.json()
def set(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.put(
request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8")
)
raise_detailed_error(request_object)
return request_object.json()
def update(self, data, token=None, json_kwargs={}):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.patch(
request_ref, headers=headers, data=json.dumps(data, **json_kwargs).encode("utf-8")
)
raise_detailed_error(request_object)
return request_object.json()
def remove(self, token=None):
request_ref = self.check_token(self.database_url, self.path, token)
self.path = ""
headers = self.build_headers(token)
request_object = self.requests.delete(request_ref, headers=headers)
raise_detailed_error(request_object)
return request_object.json()
def stream(self, stream_handler, token=None, stream_id=None):
request_ref = self.build_request_url(token)
return Stream(request_ref, stream_handler, self.build_headers, stream_id)
@staticmethod
def check_token(database_url, path, token):
if token:
return '{0}{1}.json?auth={2}'.format(database_url, path, token)
else:
return '{0}{1}.json'.format(database_url, path)
def generate_key(self):
push_chars = '-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz'
now = int(time.time() * 1000)
duplicate_time = now == self.last_push_time
self.last_push_time = now
time_stamp_chars = [0] * 8
for i in reversed(range(0, 8)):
time_stamp_chars[i] = push_chars[now % 64]
now = int(math.floor(now / 64))
new_id = "".join(time_stamp_chars)
if not duplicate_time:
for i in range(0, 12):
self.last_rand_chars.append(int(math.floor(uniform(0, 1) * 64)))
else:
for i in range(0, 11):
if self.last_rand_chars[i] == 63:
self.last_rand_chars[i] = 0
self.last_rand_chars[i] += 1
for i in range(0, 12):
new_id += push_chars[self.last_rand_chars[i]]
return new_id
@staticmethod
def sort(origin, by_key):
firebases = origin.each()
new_list = []
for firebase in firebases:
new_list.append(firebase.item)
data = sorted(dict(new_list).items(), key=lambda item: item[1][by_key])
return FirebaseResponse(convert_to_firebase(data), origin.key())
class Storage:
""" Storage Service """
def __init__(self, credentials, storage_bucket, requests):
self.storage_bucket = "https://firebasestorage.googleapis.com/v0/b/" + storage_bucket
self.credentials = credentials
self.requests = requests
self.path = ""
if credentials:
client = storage.Client(credentials=credentials, project=storage_bucket)
self.bucket = client.get_bucket(storage_bucket)
def child(self, *args):
new_path = "/".join(args)
if self.path:
self.path += "/{}".format(new_path)
else:
if new_path.startswith("/"):
new_path = new_path[1:]
self.path = new_path
return self
def put(self, file, token=None):
path = self.path
self.path = None
if isinstance(file, str):
file_object = open(file, 'rb')
else:
file_object = file
request_ref = self.storage_bucket + "/o?name={0}".format(path)
if token:
headers = {"Authorization": "Firebase " + token}
request_object = self.requests.post(request_ref, headers=headers, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
elif self.credentials:
blob = self.bucket.blob(path)
if isinstance(file, str):
return blob.upload_from_filename(filename=file)
else:
return blob.upload_from_file(file_obj=file)
else:
request_object = self.requests.post(request_ref, data=file_object)
raise_detailed_error(request_object)
return request_object.json()
def delete(self, name):
self.bucket.delete_blob(name)
def download(self, filename, token=None):
path = self.path
url = self.get_url(token)
self.path = None
if path.startswith('/'):
path = path[1:]
if self.credentials:
blob = self.bucket.get_blob(path)
blob.download_to_filename(filename)
else:
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def get_url(self, token):
path = self.path
self.path = None
if path.startswith('/'):
path = path[1:]
if token:
return "{0}/o/{1}?alt=media&token={2}".format(self.storage_bucket, quote(path, safe=''), token)
return "{0}/o/{1}?alt=media".format(self.storage_bucket, quote(path, safe=''))
def list_files(self):
return self.bucket.list_blobs()
def raise_detailed_error(request_object):
try:
request_object.raise_for_status()
except HTTPError as e:
raise HTTPError(e, request_object.text)
def convert_to_firebase(items):
firebase_list = []
for item in items:
firebase_list.append(FirebaseKeyValue(item))
return firebase_list
def convert_list_to_firebase(items):
firebase_list = []
for item in items:
firebase_list.append(FirebaseKeyValue([items.index(item), item]))
return firebase_list
class FirebaseResponse:
def __init__(self, firebases, query_key):
self.firebases = firebases
self.query_key = query_key
def val(self):
if isinstance(self.firebases, list):
firebase_list = []
if isinstance(self.firebases[0].key(), int):
for firebase in self.firebases:
firebase_list.append(firebase.val())
return firebase_list
for firebase in self.firebases:
firebase_list.append((firebase.key(), firebase.val()))
return OrderedDict(firebase_list)
else:
return self.firebases
def key(self):
return self.query_key
def each(self):
if isinstance(self.firebases, list):
return self.firebases
class FirebaseKeyValue:
def __init__(self, item):
self.item = item
def val(self):
return self.item[1]
def key(self):
return self.item[0]
class KeepAuthSession(Session):
"""
A session that doesn't drop Authentication on redirects between domains.
"""
def rebuild_auth(self, prepared_request, response):
pass
class ClosableSSEClient(SSEClient):
def __init__(self, *args, **kwargs):
self.should_connect = True
super(ClosableSSEClient, self).__init__(*args, **kwargs)
def _connect(self):
if self.should_connect:
super(ClosableSSEClient, self)._connect()
else:
raise StopIteration()
def close(self):
self.should_connect = False
self.retry = 0
self.resp.raw._fp.fp.raw._sock.shutdown(socket.SHUT_RDWR)
self.resp.raw._fp.fp.raw._sock.close()
class Stream:
def __init__(self, url, stream_handler, build_headers, stream_id):
self.build_headers = build_headers
self.url = url
self.stream_handler = stream_handler
self.stream_id = stream_id
self.sse = None
self.thread = None
self.start()
@staticmethod
def make_session():
"""
Return a custom session object to be passed to the ClosableSSEClient.
"""
session = KeepAuthSession()
return session
def start(self):
self.thread = threading.Thread(target=self.start_stream)
self.thread.start()
return self
def start_stream(self):
self.sse = ClosableSSEClient(self.url, session=self.make_session(), build_headers=self.build_headers)
for msg in self.sse:
if msg:
msg_data = json.loads(msg.data)
msg_data["event"] = msg.event
if self.stream_id:
msg_data["stream_id"] = self.stream_id
self.stream_handler(msg_data)
def close(self):
while not self.sse and not hasattr(self.sse, 'resp'):
time.sleep(0.001)
self.sse.running = False
self.sse.close()
self.thread.join()
return self
# Original code by https://github.com/thisbejim/Pyrebase/blob/master/pyrebase/pyrebase.py
# That's all folks...
|
__init__.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import argparse
import os
import roslib.network
import rospy
import socket
import sys
import threading
from fkie_master_discovery.common import get_hostname
from fkie_node_manager_daemon import host as nmdhost
from fkie_node_manager_daemon.version import detect_version
from .common import get_ros_home
from .history import History
from .name_resolution import NameResolution
from fkie_node_manager.nmd_client import NmdClient
from fkie_node_manager.nmd_client.launch_channel import BinarySelectionRequest, LaunchArgsSelectionRequest
from .progress_queue import InteractionNeededError
from .screen_handler import ScreenHandler, ScreenSelectionRequest, NoScreenOpenLogRequest
from .select_dialog import SelectDialog
from .settings import Settings
from .ssh_handler import SSHhandler, AuthenticationRequest
from .start_handler import StartException
from .start_handler import StartHandler
PKG_NAME = 'fkie_node_manager'
__author__ = "Alexander Tiderko (Alexander.Tiderko@fkie.fraunhofer.de)"
__copyright__ = "Copyright (c) 2012 Alexander Tiderko, Fraunhofer FKIE/CMS"
__license__ = "BSD"
__version__ = "unknown" # git describe --tags --dirty --always
__date__ = "unknown" # git log -1 --date=iso
# PYTHONVER = (2, 7, 1)
# if sys.version_info < PYTHONVER:
# print 'For full scope of operation this application requires python version > %s, current: %s' % (str(PYTHONVER), sys.version_info)
HOSTS_CACHE = dict()
'''
the cache directory to store the results of tests for local hosts.
:see: :meth:`is_local`
'''
_LOCK = threading.RLock()
_MAIN_FORM = None
_SETTINGS = None
_NMD_CLIENT = None
_SSH_HANDLER = None
_SCREEN_HANDLER = None
_START_HANDLER = None
_NAME_RESOLUTION = None
_HISTORY = None
_QAPP = None
def settings():
'''
:return: The global settings
:rtype: :class:`fkie_node_manager.settings.Settings`
'''
return _SETTINGS
def nmd():
'''
:return: Node manager daemon client
:rtype: :class:`fkie_node_manager.settings.NmdClient`
'''
return _NMD_CLIENT
def ssh():
'''
:return: The SSH handler to handle the SSH connections
:rtype: :class:`fkie_node_manager.settings.SSHhandler`
'''
return _SSH_HANDLER
def screen():
'''
:return: The screen handler to the screens.
:rtype: :class:`fkie_node_manager.ScreenHandler`
:see: http://linuxwiki.de/screen
'''
return _SCREEN_HANDLER
def starter():
'''
:return: The start handler to handle the start of new ROS nodes on local or remote machines.
:rtype: :class:`fkie_node_manager.settings.StartHandler`
'''
return _START_HANDLER
def nameres():
'''
:return: The name resolution object translate the the name to the host or ROS master URI.
:rtype: :class:`fkie_node_manager.settings.NameResolution`
'''
return _NAME_RESOLUTION
def history():
'''
:return: The history of entered parameter.
:rtype: :class:`fkie_node_manager.settings.History`
'''
return _HISTORY
def is_local(hostname, wait=False):
'''
Test whether the given host name is the name of the local host or not.
:param str hostname: the name or IP of the host
:return: True if the hostname is local or None
:rtype: bool
:raise Exception: on errors while resolving host
'''
if hostname is None:
return True
with _LOCK:
if hostname in HOSTS_CACHE:
if isinstance(HOSTS_CACHE[hostname], threading.Thread):
return False
return HOSTS_CACHE[hostname]
try:
socket.inet_aton(hostname)
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = hostname.startswith('127.') or hostname in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
except socket.error:
# the hostname must be resolved => do it in a thread
if wait:
result = __is_local(hostname)
return result
else:
thread = threading.Thread(target=__is_local, args=((hostname,)))
thread.daemon = True
with _LOCK:
HOSTS_CACHE[hostname] = thread
thread.start()
return False
def __is_local(hostname):
'''
Test the hostname whether it is local or not. Uses socket.gethostbyname().
'''
try:
machine_addr = socket.gethostbyname(hostname)
except socket.gaierror:
with _LOCK:
HOSTS_CACHE[hostname] = False
return False
local_addresses = ['localhost'] + roslib.network.get_local_addresses()
# check 127/8 and local addresses
result = machine_addr.startswith('127.') or machine_addr in local_addresses
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
def finish(*arg):
'''
Callback called on exit of the ros node.
'''
rospy.signal_shutdown('')
# close all ssh sessions
global _SSH_HANDLER
if _SSH_HANDLER is not None:
_SSH_HANDLER.close()
_SSH_HANDLER = None
# save the launch history
global _HISTORY
if _HISTORY is not None:
try:
_HISTORY.storeAll()
except Exception as err:
sys.stderr.write("Error while store history: %s" % err)
_HISTORY = None
from fkie_node_manager.main_window import MainWindow
# stop all threads in the main window
global _MAIN_FORM
if isinstance(_MAIN_FORM, MainWindow):
if not hasattr(_MAIN_FORM, "on_finish"):
_MAIN_FORM.close_without_ask = True
if SelectDialog.MODAL_DIALOG is not None:
SelectDialog.MODAL_DIALOG.reject()
_MAIN_FORM.hide()
_MAIN_FORM.close()
def set_terminal_name(name):
'''
Change the terminal name.
:param str name: New name of the terminal
'''
sys.stdout.write("\x1b]2;%s\x07" % name)
def set_process_name(name):
'''
Change the process name.
:param str name: name new process name
'''
try:
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary('libc.so.6')
buff = create_string_buffer(len(name) + 1)
buff.value = name
libc.prctl(15, byref(buff), 0, 0, 0)
except Exception:
try:
import setproctitle
setproctitle.setproctitle(name)
except Exception:
pass
def init_settings():
global _SETTINGS
_SETTINGS = Settings()
def init_globals(masteruri):
'''
:return: True if the masteruri referred to localhost
:rtype: bool
'''
# initialize the global handler
global _NMD_CLIENT
global _SSH_HANDLER
global _SCREEN_HANDLER
global _START_HANDLER
global _NAME_RESOLUTION
global _HISTORY
_NMD_CLIENT = NmdClient()
# _NMD_CLIENT.start()
_SSH_HANDLER = SSHhandler()
_SCREEN_HANDLER = ScreenHandler()
_START_HANDLER = StartHandler()
_NAME_RESOLUTION = NameResolution()
_HISTORY = History()
# test where the roscore is running (local or remote)
__is_local('localhost') # fill cache
return __is_local(get_hostname(masteruri)) # fill cache
def init_arg_parser():
global __version__
parser = argparse.ArgumentParser()
parser.add_argument("--version", action="version", version="%s %s" % ("%(prog)s", __version__))
parser.add_argument("-f", "--file", nargs=1, help="loads the given file as default on start")
parser.add_argument("-m", "--muri", nargs=1, default='', help="starts ROS master with given URI, usefull on hosts "
"with multiple interfaces. ROS_HOSTNAME will be set "
"to the host of this URI, but only if it is not an IP.")
parser.add_argument("-p", "--port", nargs='?', default=22622, type=int, help="port for local monitoring (default: 22622)")
group = parser.add_argument_group('echo')
group.add_argument("--echo", nargs=2, help="starts an echo dialog instead of node manager", metavar=('name', 'type'))
group.add_argument("--hz", action="store_true", help="shows only the Hz value instead of topic content in echo dialog")
group.add_argument("--ssh", action="store_true", help="connects via SSH")
return parser
def init_echo_dialog(prog_name, masteruri, topic_name, topic_type, hz=False, use_ssh=False):
'''
Intialize the environment to start an echo window.
'''
# start ROS-Master, if not currently running
# StartHandler._prepareROSMaster(masteruri)
name = '%s_echo' % prog_name
rospy.init_node(name, anonymous=True, log_level=rospy.INFO)
set_terminal_name(name)
set_process_name(name)
from fkie_node_manager.echo_dialog import EchoDialog
global _SSH_HANDLER
_SSH_HANDLER = SSHhandler()
return EchoDialog(topic_name, topic_type, hz, masteruri, use_ssh=use_ssh)
def init_main_window(prog_name, masteruri, launch_files=[], port=22622):
'''
Intialize the environment to start Node Manager.
'''
# start ROS-Master, if not currently running
StartHandler._prepareROSMaster(masteruri)
# setup the loglevel
log_level = rospy.DEBUG
try:
log_level = getattr(rospy, rospy.get_param('/%s/log_level' % prog_name, "INFO"))
except Exception as err:
print("Error while set the log level: %s\n->INFO level will be used!" % err)
rospy.init_node(prog_name, anonymous=False, log_level=log_level)
set_terminal_name(prog_name)
set_process_name(prog_name)
from fkie_node_manager.main_window import MainWindow
local_master = init_globals(masteruri)
return MainWindow(launch_files, not local_master, port)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%% MAIN %%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def main(name):
'''
Start the NodeManager or EchoDialog.
:param str name: the name propagated to the :meth:`rospy.init_node`
'''
try:
from python_qt_binding.QtGui import QApplication
except Exception:
try:
from python_qt_binding.QtWidgets import QApplication
except Exception:
sys.stderr.write("please install 'python_qt_binding' package!!")
sys.exit(-1)
init_settings()
global __version__
global __date__
__version__, __date__ = detect_version(PKG_NAME)
parser = init_arg_parser()
args = rospy.myargv(argv=sys.argv)
parsed_args = parser.parse_args(args[1:])
if parsed_args.muri:
masteruri = parsed_args.muri[0]
hostname = nmdhost.get_ros_hostname(masteruri)
os.environ['ROS_MASTER_URI'] = masteruri
if hostname:
os.environ['ROS_HOSTNAME'] = hostname
masteruri = settings().masteruri()
# Initialize Qt
global _QAPP
_QAPP = QApplication(sys.argv)
# decide to show main or echo dialog
global _MAIN_FORM
try:
if parsed_args.echo:
_MAIN_FORM = init_echo_dialog(name, masteruri, parsed_args.echo[0],
parsed_args.echo[1], parsed_args.hz,
parsed_args.ssh)
else:
_MAIN_FORM = init_main_window(name, masteruri, parsed_args.file, parsed_args.port)
thread = threading.Thread(target=rospy.spin)
thread.daemon = True
thread.start()
rospy.on_shutdown(finish)
except Exception as err:
import traceback
print(traceback.format_exc())
sys.exit("%s" % err)
exit_code = 0
# resize and show the qt window
if not rospy.is_shutdown():
# change path for access to the images of descriptions
os.chdir(settings().PACKAGE_DIR)
# _MAIN_FORM.resize(1024, 720)
screen_size = QApplication.desktop().availableGeometry()
if (_MAIN_FORM.size().width() >= screen_size.width() or
_MAIN_FORM.size().height() >= screen_size.height() - 24):
_MAIN_FORM.showMaximized()
else:
_MAIN_FORM.show()
exit_code = -1
try:
exit_code = _QAPP.exec_()
if nmd() is not None:
nmd().stop()
except Exception:
if not rospy.is_shutdown():
import traceback
print(traceback.format_exc())
return exit_code
|
gap.py
|
r"""
Interface to GAP
Sage provides an interface to the GAP system. This system provides
extensive group theory, combinatorics, etc.
The GAP interface will only work if GAP is installed on your
computer; this should be the case, since GAP is included with Sage.
The interface offers three pieces of functionality:
#. ``gap_console()`` - A function that dumps you into
an interactive command-line GAP session.
#. ``gap(expr)`` - Evaluation of arbitrary GAP
expressions, with the result returned as a string.
#. ``gap.new(expr)`` - Creation of a Sage object that
wraps a GAP object. This provides a Pythonic interface to GAP. For
example, if ``f=gap.new(10)``, then
``f.Factors()`` returns the prime factorization of
`10` computed using GAP.
First Examples
--------------
We factor an integer using GAP::
sage: n = gap(20062006); n
20062006
sage: n.parent()
Gap
sage: fac = n.Factors(); fac
[ 2, 17, 59, 73, 137 ]
sage: fac.parent()
Gap
sage: fac[1]
2
GAP and Singular
----------------
This example illustrates conversion between Singular and GAP via
Sage as an intermediate step. First we create and factor a Singular
polynomial.
::
sage: singular(389)
389
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: f = singular('9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8')
sage: F = f.factorize()
sage: print(F)
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
Next we convert the factor `-x^5+y^2` to a Sage
multivariate polynomial. Note that it is important to let
`x` and `y` be the generators of a polynomial ring,
so the eval command works.
::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: s = F[1][3].sage_polystring(); s
'-x**5+y**2'
sage: g = eval(s); g
-x^5 + y^2
Next we create a polynomial ring in GAP and obtain its
indeterminates::
sage: R = gap.PolynomialRing('Rationals', 2); R
PolynomialRing( Rationals, ["x_1", "x_2"] )
sage: I = R.IndeterminatesOfPolynomialRing(); I
[ x_1, x_2 ]
In order to eval `g` in GAP, we need to tell GAP to view
the variables ``x0`` and ``x1`` as the two
generators of `R`. This is the one tricky part. In the GAP
interpreter the object ``I`` has its own name (which
isn't ``I``). We can access its name using
``I.name()``.
::
sage: _ = gap.eval("x := %s[1];; y := %s[2];;"%(I.name(), I.name()))
Now `x_0` and `x_1` are defined, so we can
construct the GAP polynomial `f` corresponding to
`g`::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: f = gap(str(g)); f
-x_1^5+x_2^2
We can call GAP functions on `f`. For example, we evaluate
the GAP ``Value`` function, which evaluates `f`
at the point `(1,2)`.
::
sage: f.Value(I, [1,2])
3
sage: g(1,2) # agrees
3
Saving and loading objects
--------------------------
Saving and loading GAP objects (using the dumps method, etc.) is
*not* supported, since the output string representation of Gap
objects is sometimes not valid input to GAP. Creating classes that
wrap GAP objects *is* supported, via simply defining the a
_gap_init_ member function that returns a string that when
evaluated in GAP constructs the object. See
``groups/perm_gps/permgroup.py`` for a nontrivial
example of this.
Long Input
----------
The GAP interface reads in even very long input (using files) in a
robust manner, as long as you are creating a new object.
.. note::
Using ``gap.eval`` for long input is much less robust, and is not
recommended.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = gap(t)
Changing which GAP is used
--------------------------
Use this code to change which GAP interpreter is run. E.g.,
::
import sage.interfaces.gap
sage.interfaces.gap.gap_cmd = "/usr/local/bin/gap"
AUTHORS:
- David Joyner and William Stein: initial version(s)
- William Stein (2006-02-01): modified gap_console command so it uses
exactly the same startup command as Gap.__init__.
- William Stein (2006-03-02): added tab completions: gap.[tab], x =
gap(...), x.[tab], and docs, e.g., gap.function? and x.function?
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
from .expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from sage.env import SAGE_LOCAL, SAGE_EXTCODE, DOT_SAGE
from sage.misc.misc import is_in_string
from sage.misc.superseded import deprecation
from sage.misc.cachefunc import cached_method
from sage.interfaces.tab_completion import ExtraTabCompletion
import re
import os
import pexpect
import time
import platform
import string
GAP_DIR = os.path.join(DOT_SAGE, 'gap')
WORKSPACE = os.path.join(GAP_DIR, 'workspace-%s'%abs(hash(SAGE_LOCAL)))
GAP_BINARY = os.path.join(SAGE_LOCAL, 'bin', 'gap')
first_try = True
gap_cmd = "gap -r"
if platform.processor() == 'ia64' and os.path.exists('/usr/bin/prctl'):
# suppress unaligned access to 0x..., ip=0x... warnings
gap_cmd = 'prctl --unaligned=silent ' + gap_cmd
def gap_command(use_workspace_cache=True, local=True):
if use_workspace_cache:
if local:
return "%s -L %s"%(gap_cmd, WORKSPACE), False
else:
# TO DO: Use remote workspace
return gap_cmd, False
else:
return gap_cmd, True
############ Set the GAP memory pool size
# you should always use get_gap_memory_pool_size() to access this value
gap_memory_pool_size = None
def set_gap_memory_pool_size(size_in_bytes):
"""
Set the desired gap memory pool size.
Subsequently started GAP/libGAP instances will use this as
default. Currently running instances are unchanged.
GAP will only reserve ``size_in_bytes`` address space. Unless you
actually start a big GAP computation, the memory will not be
used. However, corresponding swap space will be reserved so that
GAP will always be able to use the reserved address space if
needed. While nothing is actually written to disc as long as you
don't run a big GAP computation, the reserved swap space will not
be available for other processes.
INPUT:
- ``size_in_bytes`` -- integer. The desired memory pool size.
EXAMPLES::
sage: from sage.interfaces.gap import \
... get_gap_memory_pool_size, set_gap_memory_pool_size
sage: n = get_gap_memory_pool_size()
sage: set_gap_memory_pool_size(n)
sage: n == get_gap_memory_pool_size()
True
sage: n # random output
1534059315
"""
global gap_memory_pool_size
gap_memory_pool_size = size_in_bytes
def get_gap_memory_pool_size():
"""
Get the gap memory pool size for new GAP processes.
EXAMPLES::
sage: from sage.interfaces.gap import \
... get_gap_memory_pool_size
sage: get_gap_memory_pool_size() # random output
1534059315
"""
global gap_memory_pool_size
if gap_memory_pool_size is not None:
return gap_memory_pool_size
from sage.misc.memory_info import MemoryInfo
mem = MemoryInfo()
suggested_size = max(mem.available_swap() // 10,
mem.available_ram() // 50)
# Don't eat all address space if the user set ulimit -v
suggested_size = min(suggested_size, mem.virtual_memory_limit()//10)
# ~220MB is the minimum for long doctests
suggested_size = max(suggested_size, 250 * 1024**2)
return suggested_size
def _get_gap_memory_pool_size_MB():
"""
Return the gap memory pool size suitable for usage on the GAP
command line.
The GAP 4.5.6 command line parser had issues with large numbers, so
we return it in megabytes.
OUTPUT:
String.
EXAMPLES:
sage: from sage.interfaces.gap import \
... _get_gap_memory_pool_size_MB
sage: _get_gap_memory_pool_size_MB() # random output
'1467m'
"""
pool = get_gap_memory_pool_size()
pool = (pool // (1024**2)) + 1
return str(pool)+'m'
############ Classes with methods for both the GAP3 and GAP4 interface
class Gap_generic(ExtraTabCompletion, Expect):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
_identical_function = "IsIdenticalObj"
def _synchronize(self, timeout=0.5, cmd='%s;'):
"""
Synchronize GAP pexpect interface.
See the base method
:meth:`~sage.interfaces.expect.Expect._synchronize` for more
details.
We override this method since we are looking at GAP package
mode output, which is quite different from the normal
(human-readable) interface.
EXAMPLES::
sage: gap('"ok"')
ok
sage: gap._expect.sendline() # now we are out of sync
1
sage: gap._synchronize()
sage: gap(123)
123
"""
if self._expect is None:
return
E = self._expect
from sage.misc.prandom import randrange
rnd = randrange(2147483647)
cmd = str(rnd)+';'
try:
E.sendline(cmd)
E.expect('@[nf][@J\s>]*'+str(rnd), timeout=timeout)
E.send(' ')
E.expect('@i', timeout=timeout)
except pexpect.TIMEOUT:
self.interrupt()
except pexpect.EOF:
self._crash_msg()
self.quit()
def interrupt(self, tries=None, timeout=1, quit_on_fail=True):
"""
Interrupt the GAP process
Gap installs a SIGINT handler, we call it directly instead of
trying to sent Ctrl-C. Unlike
:meth:`~sage.interfaces.expect.Expect.interrupt`, we only try
once since we are knowing what we are doing.
Sometimes GAP dies while interrupting.
EXAMPLES::
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False);
''
sage: rc = gap.interrupt(timeout=1)
sage: [ gap(i) for i in range(10) ] # check that it is still working
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TESTS::
sage: gap('"finished computation"'); gap.interrupt(); gap('"ok"')
finished computation
True
ok
"""
E = self._expect
if E is None:
return True
# GAP oddity: If a computation is running and we send Ctrl-C,
# it is stopped as expected. But if we are at the idle prompt,
# nothing is happening UNTIL we run the next command (which is
# then immediately interrupted).
# There is apparently also a race in GAP between the signal
# handler and input, if we don't wait a bit the result is
# unpredictable.
E.sendline(chr(3))
time.sleep(0.1)
E.sendline()
try:
# send a dummy command
E.sendline('224433409;')
# read everything up to the actual output of the command
E.expect('@[nf][@J\s>]*224433409', timeout=timeout)
E.send(' ')
# the following input prompt should be the current input
# prompt but GAP might be too confused to display it
# E.expect('@i', timeout=timeout)
# Ideally, we would be finished here. But sometimes GAP
# thinks it is still inside a do/od block. So we run some
# more plain commands to get back into sync. These might
# either complete successfully (output "@n+<number>") or
# return a "Syntax error: od expected@J@f +<number>"
E.sendline()
time.sleep(0.1)
E.sendline('224433437;')
E.expect('@[nf][@J\s>]*224433437', timeout=timeout)
E.sendline()
time.sleep(0.1)
E.sendline('224433479;')
E.expect('@[nf][@J\s>]*224433479', timeout=timeout)
E.send(' ')
# the following input prompt is now the current input prompt
E.expect('@i', timeout=timeout)
success = True
except (pexpect.TIMEOUT, pexpect.EOF):
# GAP died or hangs indefinitely
success = False
if not success and quit_on_fail:
self.quit()
return success
def _assign_symbol(self):
r"""
Return the assign symbol in GAP.
TESTS::
sage: gap = Gap()
sage: print(gap._assign_symbol())
:=
"""
return ":="
def _quit_string(self):
"""
Returns the string used to quit GAP.
EXAMPLES::
sage: gap._quit_string()
'quit;'
::
sage: g = Gap()
sage: a = g(2); g.is_running()
True
sage: g.quit()
sage: g.is_running()
False
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
Returns the command use to read in a file in GAP.
EXAMPLES::
sage: gap._read_in_file_command('test')
'Read("test");'
::
sage: filename = tmp_filename()
sage: f = open(filename, 'w')
sage: f.write('xx := 22;\n')
sage: f.close()
sage: gap.read(filename)
sage: gap.get('xx').strip()
'22'
"""
return 'Read("%s");'%filename
def _continuation_prompt(self):
"""
Returns the continuation prompt in GAP.
EXAMPLES::
sage: gap._continuation_prompt()
'> '
"""
return '> '
def load_package(self, pkg, verbose=False):
"""
Load the Gap package with the given name.
If loading fails, raise a RuntimeError exception.
TESTS::
sage: gap.load_package("chevie")
Traceback (most recent call last):
...
RuntimeError: Error loading Gap package chevie. You may want to install the gap_packages and/or database_gap SPKGs.
"""
if verbose:
print("Loading GAP package %s" % pkg)
x = self.eval('LoadPackage("%s")'%pkg)
if x == 'fail':
raise RuntimeError("Error loading Gap package "+str(pkg)+". "+
"You may want to install the gap_packages and/or database_gap SPKGs.")
def eval(self, x, newlines=False, strip=True, split_lines=True, **kwds):
r"""
Send the code in the string s to the GAP interpreter and return the
output as a string.
INPUT:
- ``s`` - string containing GAP code.
- ``newlines`` - bool (default: True); if False,
remove all backslash-newlines inserted by the GAP output
formatter.
- ``strip`` - ignored
- ``split_lines`` -- bool (default: True); if True then each
line is evaluated separately. If False, then the whole
block of code is evaluated all at once.
EXAMPLES::
sage: gap.eval('2+2')
'4'
sage: gap.eval('Print(4); #test\n Print(6);')
'46'
sage: gap.eval('Print("#"); Print(6);')
'#6'
sage: gap.eval('4; \n 6;')
'4\n6'
sage: gap.eval('if 3>2 then\nPrint("hi");\nfi;')
'hi'
sage: gap.eval('## this is a test\nPrint("OK")')
'OK'
sage: gap.eval('Print("This is a test. Oh no, a #");# but this is a comment\nPrint("OK")')
'This is a test. Oh no, a #OK'
sage: gap.eval('if 4>3 then')
''
sage: gap.eval('Print("Hi how are you?")')
'Hi how are you?'
sage: gap.eval('fi')
''
"""
# '"
#We remove all of the comments: On each line, we try
#to find a pound sign. If we find it, we check to see if
#it is occurring in a string. If it is not in a string, we
#strip off the comment.
if not split_lines:
input_line=str(x)
else:
input_line = ""
for line in str(x).rstrip().split('\n'):
pound_position = line.find('#')
while pound_position != -1:
if not is_in_string(line, pound_position):
line = line[:pound_position]
pound_position = line.find('#',pound_position+1)
input_line += " "+line
if not input_line.endswith(';'):
input_line += ';'
result = Expect.eval(self, input_line, **kwds)
if not newlines:
result = result.replace("\\\n","")
return result.strip()
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
if self._expect is None: # interface is down
self._start()
E = self._expect
try:
if len(line) > 4095:
raise RuntimeError("Passing commands this long to gap would hang")
E.sendline(line)
except OSError:
raise RuntimeError("Error evaluating %s in %s"%(line, self))
if wait_for_prompt == False:
return ('','')
if len(line)==0:
return ('','')
try:
terminal_echo = [] # to be discarded
normal_outputs = [] # GAP stdout
error_outputs = [] # GAP stderr
current_outputs = terminal_echo
while True:
x = E.expect_list(self._compiled_full_pattern)
current_outputs.append(E.before)
if x == 0: # @p
if E.after != '@p1.':
print("Warning: possibly wrong version of GAP package interface\n")
print("Crossing fingers and continuing\n")
elif x == 1: #@@
current_outputs.append('@')
elif x == 2: #special char
current_outputs.append(chr(ord(E.after[1:2])-ord('A')+1))
elif x == 3: # garbage collection info, ignore
pass
elif x == 4: # @e -- break loop
E.sendline("quit;")
elif x == 5: # @c completion, doesn't seem to happen when -p is in use
print("I didn't think GAP could do this\n")
elif x == 6: # @f GAP error message
current_outputs = error_outputs;
elif x == 7: # @h help text, but this stopped happening with new help
print("I didn't think GAP could do this")
elif x == 8: # @i awaiting normal input
break;
elif x == 9: # @m finished running a child
pass # there is no need to do anything
elif x==10: #@n normal output line
current_outputs = normal_outputs;
elif x==11: #@r echoing input
current_outputs = terminal_echo
elif x==12: #@sN shouldn't happen
print("Warning: this should never happen")
elif x==13: #@w GAP is trying to send a Window command
print("Warning: this should never happen")
elif x ==14: #@x seems to be safely ignorable
pass
elif x == 15:#@z GAP starting a subprocess
pass # there is no need to do anything
except pexpect.EOF:
if not expect_eof:
raise RuntimeError("Unexpected EOF from %s executing %s"%(self,line))
except IOError:
raise RuntimeError("IO Error from %s executing %s"%(self,line))
return ("".join(normal_outputs),"".join(error_outputs))
def _keyboard_interrupt(self):
"""
TESTS:
We check that the gap interface behaves correctly after an
interrupt::
sage: gap(2)
2
sage: try:
....: alarm(0.5)
....: while True: SymmetricGroup(7).conjugacy_classes_subgroups()
....: except KeyboardInterrupt:
....: pass
sage: gap(2)
2
"""
self.quit()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
"""
Evaluate a line of commands.
REMARK:
By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
is evaluated using :meth:`_eval_line_using_file`.
If the command can not be evaluated since the interface
has crashed, it is automatically restarted and tried
again *once*.
If the optional ``wait_for_prompt`` is ``False`` then even a very long line
will not be evaluated by :meth:`_eval_line_using_file`, since this does not
support the ``wait_for_prompt`` option.
INPUT:
- ``line`` -- (string) a command.
- ``allow_use_file`` (optional bool, default ``True``) --
allow to evaluate long commands using :meth:`_eval_line_using_file`.
- ``wait_for_prompt`` (optional bool, default ``True``) --
wait until the prompt appears in the sub-process' output.
- ``restart_if_needed`` (optional bool, default ``True``) --
If it is ``True``, the command evaluation is evaluated
a second time after restarting the interface, if an
``EOFError`` occured.
TESTS::
sage: gap._eval_line('2+2;')
'4'
We test the ``wait_for_prompt`` option by sending a command that
creates an infinite loop in the GAP sub-process. But if we don't
wait for the prompt to appear in the output, we can interrupt
the loop without raising a KeyboardInterrupt. At the same time,
we test that the line is not forwarded to :meth:`_eval_line_using_file`,
since that method would not support the ``wait_for_prompt`` option::
sage: cutoff = gap._eval_using_file_cutoff
sage: gap._eval_using_file_cutoff = 4
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: gap._eval_using_file_cutoff = cutoff
The following tests against a bug fixed at :trac:`10296`::
sage: gap(3)
3
sage: gap.eval('quit;')
''
sage: a = gap(3)
** Gap crashed or quit executing '\$sage...:=3;;' **
Restarting Gap and trying again
sage: a
3
"""
#if line.find('\n') != -1:
# raise ValueError, "line must not contain any newlines"
E = None
try:
if self._expect is None:
self._start()
E = self._expect
#import pdb; pdb.set_trace()
if allow_use_file and wait_for_prompt and len(line) > self._eval_using_file_cutoff:
return self._eval_line_using_file(line)
(normal, error) = self._execute_line(line, wait_for_prompt=wait_for_prompt,
expect_eof= (self._quit_string() in line))
if len(error)> 0:
if 'Error, Rebuild completion files!' in error:
error += "\nRunning gap_reset_workspace()..."
self.quit()
gap_reset_workspace()
error = error.replace('\r','')
raise RuntimeError("%s produced error output\n%s\n executing %s"%(self, error,line))
if len(normal) == 0:
return ''
if isinstance(wait_for_prompt, str) and normal.ends_with(wait_for_prompt):
n = len(wait_for_prompt)
elif normal.endswith(self._prompt):
n = len(self._prompt)
elif normal.endswith(self._continuation_prompt()):
n = len(self._continuation_prompt())
else:
n = 0
out = normal[:-n]
if len(out) > 0 and out[-1] == "\n":
out = out[:-1]
return out
except (RuntimeError,TypeError) as message:
if 'EOF' in message[0] or E is None or not E.isalive():
print("** %s crashed or quit executing '%s' **" % (self, line))
print("Restarting %s and trying again" % self)
self._start()
if line != '':
return self._eval_line(line, allow_use_file=allow_use_file)
else:
return ''
else:
raise RuntimeError(message)
except KeyboardInterrupt:
self._keyboard_interrupt()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def unbind(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
sage: gap.unbind('x')
sage: gap.get('x')
Traceback (most recent call last):
...
RuntimeError: Gap produced error output
Error, Variable: 'x' must have a value
...
"""
self.eval('Unbind(%s)'%var)
self.clear(var)
def _contains(self, v1, v2):
"""
EXAMPLES::
sage: Integers = gap('Integers')
sage: two = gap(2)
sage: gap._contains(two.name(), Integers.name())
True
::
sage: 2 in gap('Integers')
True
"""
return self.eval('%s in %s'%(v1,v2)) == "true"
def _true_symbol(self):
"""
Returns the symbol for truth in GAP.
EXAMPLES::
sage: gap._true_symbol()
'true'
sage: gap(2) == gap(2)
True
"""
return "true"
def _false_symbol(self):
"""
Returns the symbol for falsity in GAP.
EXAMPLES::
sage: gap._false_symbol()
'false'
sage: gap(2) == gap(3)
False
"""
return "false"
def _equality_symbol(self):
"""
Returns the symbol for equality in GAP.
EXAMPLES::
sage: gap._equality_symbol()
'='
sage: gap(2) == gap(3)
False
sage: gap(2) == gap(2)
True
"""
return "="
def version(self):
"""
Returns the version of GAP being used.
EXAMPLES::
sage: print(gap.version())
4.8...
"""
return self.eval('VERSION')[1:-1]
def function_call(self, function, args=None, kwds=None):
"""
Calls the GAP function with args and kwds.
EXAMPLES::
sage: gap.function_call('SymmetricGroup', [5])
SymmetricGroup( [ 1 .. 5 ] )
If the GAP function does not return a value, but prints something
to the screen, then a string of the printed output is returned.
::
sage: s = gap.function_call('Display', [gap.SymmetricGroup(5).CharacterTable()])
sage: type(s)
<class 'sage.interfaces.interface.AsciiArtString'>
sage: s.startswith('CT')
True
TESTS:
If the function call is too long, two ``gap.eval`` calls are made
since returned values from commands in a file cannot be handled
properly::
sage: g = Gap()
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_very_very_very_long_name'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ () ] )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),SymmetricGroup( [ 1 .. 2 ] )) ]
When the command itself is so long that it warrants use of a temporary
file to be communicated to GAP, this does not cause problems since
the file will contain a single command::
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_name_so_very_very_very_long_that_even_by_itself_will_make_expect_use_a_file'))
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ () ] )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),SymmetricGroup( [ 1 .. 2 ] )) ]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
#Here we have to do some magic because not all GAP
#functions return a value. If you try to store their
#results to a variable, then GAP will complain. Thus, before
#we evaluate the function, we make it so that the marker string
#is in the 'last' variable in GAP. If the function returns a
#value, then that value will be in 'last', otherwise it will
#be the marker.
marker = '__SAGE_LAST__:="__SAGE_LAST__";;'
cmd = "%s(%s);;"%(function, ",".join([s.name() for s in args]+
['%s=%s'%(key,value.name()) for key, value in kwds.items()]))
if len(marker) + len(cmd) <= self._eval_using_file_cutoff:
# We combine the two commands so we only run eval() once and the
# only output would be from the second command
res = self.eval(marker+cmd)
else:
self.eval(marker)
res = self.eval(cmd)
if self.eval(self._identical_function + '(last,__SAGE_LAST__)') != 'true':
return self.new('last2;')
else:
if res.strip():
from sage.interfaces.expect import AsciiArtString
return AsciiArtString(res)
def get_record_element(self, record, name):
r"""
Return the element of a GAP record identified by ``name``.
INPUT:
- ``record`` -- a GAP record
- ``name`` -- str
OUTPUT:
- :class:`GapElement`
EXAMPLES::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: gap.get_record_element(rec, 'a')
1
sage: gap.get_record_element(rec, 'b')
2
TESTS::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: type(gap.get_record_element(rec, 'a'))
<class 'sage.interfaces.gap.GapElement'>
"""
return self('%s.%s' % (record.name(), name))
class GapElement_generic(ExtraTabCompletion, ExpectElement):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
def __repr__(self):
"""
EXAMPLES::
sage: gap(2)
2
"""
s = ExpectElement.__repr__(self)
if s.find('must have a value') != -1:
raise RuntimeError("An error occurred creating an object in %s from:\n'%s'\n%s"%(self.parent().name(), self._create, s))
return s
def bool(self):
"""
EXAMPLES::
sage: bool(gap(2))
True
sage: gap(0).bool()
False
sage: gap('false').bool()
False
"""
P = self._check_valid()
return self != P(0) and repr(self) != 'false'
def __len__(self):
"""
EXAMPLES::
sage: v = gap('[1,2,3]'); v
[ 1, 2, 3 ]
sage: len(v)
3
len is also called implicitly by if::
sage: if gap('1+1 = 2'):
....: print("1 plus 1 does equal 2")
1 plus 1 does equal 2
::
sage: if gap('1+1 = 3'):
....: print("it is true")
....: else:
....: print("it is false")
it is false
"""
P = self.parent()
if P.eval('%s = true'%self.name()) == 'true':
return 1
elif P.eval('%s = false'%self.name()) == 'true':
return 0
else:
return int(self.Length())
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: gap('"abc"').is_string()
True
sage: gap('[1,2,3]').is_string()
False
"""
return bool(self.IsString())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Gap matrix.
EXAMPLES::
sage: s = gap("(Z(7)^0)*[[1,2,3],[4,5,6]]"); s
[ [ Z(7)^0, Z(7)^2, Z(7) ], [ Z(7)^4, Z(7)^5, Z(7)^3 ] ]
sage: s._matrix_(GF(7))
[1 2 3]
[4 5 6]
::
sage: s = gap("[[1,2], [3/4, 5/6]]"); s
[ [ 1, 2 ], [ 3/4, 5/6 ] ]
sage: m = s._matrix_(QQ); m
[ 1 2]
[3/4 5/6]
sage: parent(m)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
::
sage: s = gap('[[Z(16),Z(16)^2],[Z(16)^3,Z(16)]]')
sage: s._matrix_(GF(16,'a'))
[ a a^2]
[a^3 a]
"""
v = self.DimensionsMat()
n = int(v[1])
m = int(v[2])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r,c]) for c in range(1,m+1)] for r in range(1,n+1)]
return M(entries)
############
class Gap(Gap_generic):
r"""
Interface to the GAP interpreter.
AUTHORS:
- William Stein and David Joyner
"""
def __init__(self, max_workspace_size=None,
maxread=None, script_subdirectory=None,
use_workspace_cache=True,
server=None,
server_tmpdir=None,
logfile=None,
seed=None):
"""
EXAMPLES::
sage: gap == loads(dumps(gap))
True
"""
self.__use_workspace_cache = use_workspace_cache
cmd, self.__make_workspace = gap_command(use_workspace_cache, server is None)
cmd += " -b -p -T"
if max_workspace_size is None:
max_workspace_size = _get_gap_memory_pool_size_MB()
cmd += ' -o ' + str(max_workspace_size)
cmd += ' -s ' + str(max_workspace_size)
cmd += ' -m 64m ' # attempt at a workaround for http://tracker.gap-system.org/issues/224
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','sage.g')
Expect.__init__(self,
name='gap',
prompt='gap> ',
command=cmd,
maxread=maxread,
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
restart_on_ctrlc=True,
verbose_start=False,
logfile=logfile,
eval_using_file_cutoff=100)
self.__seq = 0
self._seed = seed
def set_seed(self,seed=None):
"""
Sets the seed for gap interpeter.
The seed should be an integer.
EXAMPLES::
sage: g = Gap()
sage: g.set_seed(0)
0
sage: [g.Random(1,10) for i in range(5)]
[2, 3, 3, 4, 2]
"""
if seed is None:
seed = self.rand_seed()
self.eval("Reset(GlobalMersenneTwister,%d)" % seed)
self.eval("Reset(GlobalRandomSource,%d)" % seed)
self._seed = seed
return seed
def __reduce__(self):
"""
EXAMPLES::
sage: gap.__reduce__()
(<function reduce_load_GAP at 0x...>, ())
sage: f, args = _
sage: f(*args)
Gap
"""
return reduce_load_GAP, tuple([])
def _next_var_name(self):
r"""
Returns the next unused variable name.
Note that names starting with dollar signs are valid GAP
identifiers, but need to be escaped with a backslash starting
with GAP-4.8.
EXAMPLES::
sage: g = Gap()
sage: g._next_var_name()
'\\$sage1'
sage: g(2)^2
4
sage: g._next_var_name()
'\\$sage...'
"""
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return r'\$sage%s'%self.__seq
def _start(self):
"""
EXAMPLES::
sage: g = Gap()
sage: g.is_running()
False
sage: g._start()
sage: g.is_running()
True
sage: g.quit()
"""
if self.__use_workspace_cache:
try:
# Check to see if we need to auto-regenerate the gap
# workspace, i.e., if the gap script is more recent
# than the saved workspace, which signals that gap has
# been upgraded.
if os.path.getmtime(WORKSPACE) < os.path.getmtime(GAP_BINARY):
raise OSError("GAP workspace too old")
# Set the modification time of the workspace to the
# current time. This ensures the workspace doesn't
# get deleted too soon by gap_reset_workspace().
os.utime(WORKSPACE, None)
except OSError:
gap_reset_workspace(verbose=False)
global first_try
n = self._session_number
try:
Expect._start(self, "Failed to start GAP.")
except Exception:
if self.__use_workspace_cache and first_try:
first_try = False
self.quit()
gap_reset_workspace(verbose=False)
Expect._start(self, "Failed to start GAP.")
self._session_number = n
self.__make_workspace = False
else:
raise
if self.__use_workspace_cache and self.__make_workspace:
self.save_workspace()
# Now, as self._expect exists, we can compile some useful pattern:
self._compiled_full_pattern = self._expect.compile_pattern_list([
'@p\d+\.','@@','@[A-Z]','@[123456!"#$%&][^+]*\+',
'@e','@c','@f','@h','@i','@m','@n','@r','@s\d','@w.*\+','@x','@z'])
# read everything up to the first "ready" prompt
self._expect.expect("@i")
# set random seed
self.set_seed(self._seed)
def _function_class(self):
"""
Returns the GapFunction class.
EXAMPLES::
sage: gap._function_class()
<class 'sage.interfaces.gap.GapFunction'>
::
sage: type(gap.Order)
<class 'sage.interfaces.gap.GapFunction'>
"""
return GapFunction
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap.cputime()
sage: t #random
0.13600000000000001
sage: gap.Order(gap.SymmetricGroup(5))
120
sage: gap.cputime(t) #random
0.059999999999999998
"""
if t is not None:
return self.cputime() - t
else:
self.eval('_r_ := Runtimes();')
r = sum(eval(self.eval('[_r_.user_time, _r_.system_time, _r_.user_time_children, _r_.system_time_children]')))
return r/1000.0
def save_workspace(self):
r"""
Save the GAP workspace.
TESTS:
We make sure that #9938 (GAP does not start if the path to the GAP
workspace file contains more than 82 characters) is fixed::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = os.path.join(SAGE_TMP, "gap" + "0"*(80-len(SAGE_TMP)))
sage: gap = Gap()
sage: gap('3+2') # long time (4s on sage.math, 2013)
5
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# According to the GAP Reference Manual,
# [http://www.gap-system.org/Manuals/doc/htm/ref/CHAP003.htm#SSEC011.1]
# SaveWorkspace can only be used at the main gap> prompt. It cannot
# be included in the body of a loop or function, or called from a
# break loop.
from sage.misc.temporary_file import atomic_write
with atomic_write(WORKSPACE) as f:
f.close()
self.eval('SaveWorkspace("%s");'%(f.name), allow_use_file=False)
# Todo -- this -- but there is a tricky "when does it end" issue!
# Maybe do via a file somehow?
def help(self, s, pager=True):
"""
Print help on a given topic.
EXAMPLES::
sage: print(gap.help('SymmetricGroup', pager=False))
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
tmp_to_use = self._local_tmpfile()
if self.is_remote():
tmp_to_use = self._remote_tmpfile()
else:
tmp_to_use = self._local_tmpfile()
self.eval('SetGAPDocTextTheme("none")')
self.eval(r'\$SAGE.tempfile := "%s";'%tmp_to_use)
line = Expect.eval(self, "? %s"%s)
Expect.eval(self, "? 1")
match = re.search("Page from (\d+)", line)
if match is None:
print(line)
else:
(sline,) = match.groups()
if self.is_remote():
self._get_tmpfile()
F = open(self._local_tmpfile(),"r")
help = F.read()
if pager:
from IPython.core.page import page
page(help, start = int(sline)-1)
else:
return help
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
cmd = ('%s:=%s;;' % (var, value)).replace('\n','')
self._eval_line(cmd, allow_use_file=True)
def get(self, var, use_file=False):
"""
Get the string representation of the variable var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
if use_file:
tmp = self._local_tmpfile()
if os.path.exists(tmp):
os.unlink(tmp)
self.eval('PrintTo("%s", %s);'%(tmp,var), strip=False)
r = open(tmp).read()
r = r.strip().replace("\\\n","")
os.unlink(tmp)
return r
else:
return self.eval('Print(%s);'%var, newlines=False)
def _pre_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StartInteract();')
def _post_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StopInteract();')
def _eval_line_using_file(self, line):
i = line.find(':=')
if i != -1:
j = line.find('"')
if j >= 0 and j < i:
i = -1
if i == -1:
line0 = 'Print( %s );'%line.rstrip().rstrip(';')
try: # this is necessary, since Print requires something as input, and some functions (e.g., Read) return nothing.
return Expect._eval_line_using_file(self, line0)
except RuntimeError:
return ''
return Expect._eval_line_using_file(self, line)
def console(self):
"""
Spawn a new GAP command-line session.
EXAMPLES::
sage: gap.console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
"""
gap_console()
def _object_class(self):
"""
Returns the GapElement class.
EXAMPLES::
sage: gap._object_class()
<class 'sage.interfaces.gap.GapElement'>
sage: type(gap(2))
<class 'sage.interfaces.gap.GapElement'>
"""
return GapElement
def _function_element_class(self):
"""
Returns the GapFunctionElement class.
EXAMPLES::
sage: gap._function_element_class()
<class 'sage.interfaces.gap.GapFunctionElement'>
sage: type(gap.SymmetricGroup(4).Order)
<class 'sage.interfaces.gap.GapFunctionElement'>
"""
return GapFunctionElement
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: '{}' in gap._tab_completion()
False
sage: c = gap._tab_completion()
sage: len(c) > 100
True
sage: 'Order' in c
True
"""
names = eval(self.eval('NamesSystemGVars()')) + \
eval(self.eval('NamesUserGVars()'))
return [n for n in names if n[0] in string.ascii_letters]
############
def gap_reset_workspace(max_workspace_size=None, verbose=False):
r"""
Call this to completely reset the GAP workspace, which is used by
default when Sage first starts GAP.
The first time you start GAP from Sage, it saves the startup state
of GAP in a file ``$HOME/.sage/gap/workspace-HASH``, where ``HASH``
is a hash of the directory where Sage is installed.
This is useful, since then subsequent startup of GAP is at least 10
times as fast. Unfortunately, if you install any new code for GAP,
it won't be noticed unless you explicitly load it, e.g., with
gap.load_package("my_package")
The packages sonata, guava, factint, gapdoc, grape, design, toric,
and laguna are loaded in all cases before the workspace is saved,
if they are available.
TESTS:
Check that ``gap_reset_workspace`` still works when ``GAP_DIR``
doesn't exist, see :trac:`14171`::
sage: ORIGINAL_GAP_DIR = sage.interfaces.gap.GAP_DIR
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.GAP_DIR = os.path.join(tmp_dir(), "test_gap_dir")
sage: sage.interfaces.gap.WORKSPACE = os.path.join(sage.interfaces.gap.GAP_DIR, "test_workspace")
sage: os.path.isfile(sage.interfaces.gap.WORKSPACE) # long time
False
sage: gap_reset_workspace() # long time
sage: os.path.isfile(sage.interfaces.gap.WORKSPACE) # long time
True
sage: sage.interfaces.gap.GAP_DIR = ORIGINAL_GAP_DIR
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
Check that the race condition from :trac:`14242` has been fixed.
We temporarily need to change the worksheet filename. ::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = tmp_filename()
sage: from multiprocessing import Process
sage: import time
sage: gap = Gap() # long time (reset GAP session)
sage: P = [Process(target=gap, args=("14242",)) for i in range(4)]
sage: for p in P: # long time, indirect doctest
....: p.start()
....: time.sleep(0.2)
sage: for p in P: # long time
....: p.join()
sage: os.unlink(sage.interfaces.gap.WORKSPACE) # long time
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# Make sure GAP_DIR exists
try:
os.makedirs(GAP_DIR)
msg = "It is OK to delete all these cache files. They will be recreated as needed.\n"
open(os.path.join(GAP_DIR, 'README.txt'), 'w').write(msg)
except OSError:
if not os.path.isdir(GAP_DIR):
raise
# Delete all gap workspaces that haven't been used in the last
# week, to avoid needless cruft. I had an install on sage.math
# with 90 of these, since I run a lot of different versions of
# Sage, and it totalled 1.3GB of wasted space! See trac #4936.
# We only do this after creating a new workspace, since this cruft
# issue is only a problem if workspaces get created every so
# often. We don't want to have to do this on every startup.
now = time.time()
for F in os.listdir(GAP_DIR):
if F.startswith('workspace-'):
W = os.path.join(GAP_DIR, F)
try:
age = now - os.path.getatime(W)
if age >= 604800: # 1 week in seconds
os.unlink(W)
except OSError:
# It's not a problem if W doesn't exist, everything
# else is an error.
if os.path.exists(W):
raise
# Create new workspace with filename WORKSPACE
g = Gap(use_workspace_cache=False, max_workspace_size=None)
g.eval('SetUserPreference("HistoryMaxLines", 30)')
for pkg in ['GAPDoc', 'ctbllib', 'sonata', 'guava', 'factint', \
'gapdoc', 'grape', 'design', \
'toric', 'laguna', 'braid']:
# NOTE: Do *not* autoload hap - it screws up PolynomialRing(Rationals,2)
try:
g.load_package(pkg, verbose=verbose)
except RuntimeError as msg:
if verbose:
print('*** %s' % msg)
pass
# end for
g.save_workspace()
g.quit()
class GapElement(GapElement_generic):
def __getitem__(self, n):
"""
EXAMPLES::
sage: a = gap([1,2,3])
sage: a[1]
1
"""
self._check_valid()
if not isinstance(n, tuple):
return self.parent().new('%s[%s]'%(self._name, n))
else:
return self.parent().new('%s%s'%(self._name, ''.join(['[%s]'%x for x in n])))
def str(self, use_file=False):
"""
EXAMPLES::
sage: print(gap(2))
2
"""
if use_file:
P = self._check_valid()
return P.get(self.name(), use_file=True)
else:
return repr(self)
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
P = self._check_valid()
try:
s = P.eval('LaTeXObj(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: s5 = gap.SymmetricGroup(5)
sage: 'Centralizer' in s5._tab_completion()
True
"""
from sage.misc.misc import uniq
P = self.parent()
v = P.eval(r'\$SAGE.OperationsAdmittingFirstArgument(%s)'%self.name())
v = v.replace('Tester(','').replace('Setter(','').replace(')','').replace('\n', '')
v = v.split(',')
v = [ oper.split('"')[1] for oper in v ]
v = [ oper for oper in v if all(ch in string.ascii_letters for ch in oper) ]
v = uniq(v)
return v
class GapFunctionElement(FunctionElement):
def _sage_doc_(self):
"""
EXAMPLES::
sage: print(gap(4).SymmetricGroup._sage_doc_())
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
M = self._obj.parent()
help = M.help(self._name, pager=False)
return help
class GapFunction(ExpectFunction):
def _sage_doc_(self):
"""
EXAMPLES::
sage: print(gap.SymmetricGroup._sage_doc_())
<BLANKLINE>
50 Group Libraries
<BLANKLINE>
When you start GAP, it already knows several groups. Currently GAP initially
knows the following groups:
...
"""
M = self._parent
help = M.help(self._name, pager=False)
return help
def is_GapElement(x):
"""
Returns True if x is a GapElement.
EXAMPLES::
sage: from sage.interfaces.gap import is_GapElement
sage: is_GapElement(gap(2))
True
sage: is_GapElement(2)
False
"""
return isinstance(x, GapElement)
def gfq_gap_to_sage(x, F):
"""
INPUT:
- ``x`` -- GAP finite field element
- ``F`` -- Sage finite field
OUTPUT: element of ``F``
EXAMPLES::
sage: x = gap('Z(13)')
sage: F = GF(13, 'a')
sage: F(x)
2
sage: F(gap('0*Z(13)'))
0
sage: F = GF(13^2, 'a')
sage: x = gap('Z(13)')
sage: F(x)
2
sage: x = gap('Z(13^2)^3')
sage: F(x)
12*a + 11
sage: F.multiplicative_generator()^3
12*a + 11
TESTS:
Check that :trac:`18048` is fixed::
sage: K.<a> = GF(16)
sage: b = a^2 + a
sage: K(b._gap_())
a^2 + a
AUTHOR:
- David Joyner and William Stein
"""
s = str(x)
if s[:2] == '0*':
return F(0)
i1 = s.index("(")
i2 = s.index(")")
q = eval(s[i1+1:i2].replace('^','**'))
if not F.cardinality().is_power_of(q):
raise ValueError('%r has no subfield of size %r' % (F, q))
if s.find(')^') == -1:
e = 1
else:
e = int(s[i2+2:])
if F.degree() == 1:
g = F(gap.eval('Int(Z(%s))' % q))
elif F.is_conway():
f = (F.cardinality() - 1) // (q - 1)
g = F.multiplicative_generator() ** f
else:
raise ValueError('%r is not prime or defined by a Conway polynomial' % F)
return g**e
def intmod_gap_to_sage(x):
r"""
INPUT:
- x -- Gap integer mod ring element
EXAMPLES::
sage: a = gap(Mod(3, 18)); a
ZmodnZObj( 3, 18 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 18
sage: a = gap(Mod(3, 17)); a
Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(0, 17)); a
0*Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
0
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(3, 65537)); a
ZmodpZObj( 3, 65537 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 65537
"""
from sage.rings.finite_rings.all import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
s = str(x)
m = re.search(r'Z\(([0-9]*)\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(m.group(1)))
m = re.match(r'Zmod[np]ZObj\( ([0-9]*), ([0-9]*) \)', s)
if m:
return Mod(m.group(1), m.group(2))
raise ValueError("Unable to convert Gap element '%s'" % s)
#############
gap = Gap()
def reduce_load_GAP():
"""
Returns the GAP interface object defined in sage.interfaces.gap.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load_GAP
sage: reduce_load_GAP()
Gap
"""
return gap
# This is only for backwards compatibility, in order to be able
# to unpickle the invalid objects that are in the pickle jar.
def reduce_load():
"""
This is for backwards compatibility only.
To be precise, it only serves at unpickling the invalid
gap elements that are stored in the pickle jar.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load
sage: reduce_load()
doctest:...: DeprecationWarning: This function is only used to unpickle invalid objects
See http://trac.sagemath.org/18848 for details.
<repr(<sage.interfaces.gap.GapElement at ...>) failed:
ValueError: The session in which this object was defined is no longer running.>
By :trac:`18848`, pickling actually often works::
sage: loads(dumps(gap([1,2,3])))
[ 1, 2, 3 ]
"""
deprecation(18848, "This function is only used to unpickle invalid objects")
return GapElement(None, None)
def gap_console():
"""
Spawn a new GAP command-line session.
Note that in gap-4.5.7 you cannot use a workspace cache that had
no commandline to restore a gap session with commandline.
EXAMPLES::
sage: gap_console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * http://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
TESTS::
sage: import subprocess
sage: from sage.interfaces.gap import gap_command
sage: cmd = 'echo "quit;" | ' + gap_command(use_workspace_cache=False)[0]
sage: gap_startup = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
sage: 'http://www.gap-system.org' in gap_startup
True
sage: 'Error' not in gap_startup
True
sage: 'sorry' not in gap_startup
True
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
cmd, _ = gap_command(use_workspace_cache=False)
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','console.g')
os.system(cmd)
|
deepwalk.py
|
from gensim.models import Word2Vec
import numpy as np
import multiprocessing as mp
import threading as thread
def deepwalk(graph):
args = DeepWalkSetting()
return DeepWalk_Original(args, embed_dim=args.embed_dim, workers=args.workers, graph=graph, ).get_embeddings()
class DeepWalkSetting:
'''Configuration parameters for DeepWalk.'''
def __init__(self):
self.walk_length = 80
self.number_walks = 10
self.window_size = 10
self.epoch = 5
self.seed = 123
self.workers = 20
self.embed_dim = 128
class DeepWalk_Original(object):
def __init__(self, deep_walk_arguments, embed_dim, graph, workers):
if len(graph) > 1e6: # for large graph, we generate parts of walks each time and keep updating the model.
iterations = deep_walk_arguments.number_walks
deep_walk_arguments.number_walks = 1
else: # for small graphs, we generate all the paths at once and train the model.
iterations = 1
for i in range(iterations):
all_paths = self.muti_thread_walk(deep_walk_arguments, graph, workers)
# print(all_paths)
# all_paths=self.single_generate_walks(deep_walk_arguments,graph,workers)
if i == 0:
word2vec = Word2Vec(sentences=all_paths, min_count=0, size=embed_dim, sg=1, hs=1,
workers=workers,
window=deep_walk_arguments.window_size, iter=deep_walk_arguments.epoch)
else:
word2vec.train(all_paths, total_examples=word2vec.corpus_count, epochs=deep_walk_arguments.epoch)
embeddings = np.zeros((len(graph), embed_dim))
for word in range(len(graph)):
embeddings[word] = word2vec[str(word)]
self.embeddings = embeddings
def get_embeddings(self):
return self.embeddings
# 单线程版本的deepwalk
# 可以有效防止报错
def single_generate_walks(self, deep_walk_arguments, graph, workers):
def rnd_walk_workers(graph, permuted_idx, proc_begin, proc_end, return_dict):
walk_length, window_size = (deep_walk_arguments.walk_length, deep_walk_arguments.window_size)
all_paths = []
np.random.seed(deep_walk_arguments.seed)
for _ in range(deep_walk_arguments.number_walks):
for start_idx in permuted_idx[proc_begin: proc_end]:
path = [start_idx]
for _ in range(walk_length):
curr_idx = path[-1]
neigh = []
wgts = []
for key in graph[curr_idx]:
neigh.append(key)
wgts.append(graph[curr_idx][key]['wgt'])
if len(neigh) == 0:
path.append(curr_idx)
else:
wgts = []
for key in graph[curr_idx]:
wgts.append(graph[curr_idx][key]['wgt'])
path.append(np.random.choice(neigh, p= np.asarray(wgts) / float(sum(wgts))))
all_paths.append(list(map(str, path)))
return_dict[proc_begin] = all_paths
#假设我们干脆就不用所谓的多线程
proc_begin=0
proc_end=len(graph)
manager = mp.Manager()
return_dict = manager.dict()
jobs = []
chunk_size = len(graph) // workers
np.random.seed(deep_walk_arguments.seed)
permuted_idx = np.random.permutation(len(graph))
rnd_walk_workers(graph,permuted_idx,proc_begin,proc_end,return_dict)
all_paths = []
key_arr = sorted(return_dict.keys())
np.random.shuffle(key_arr)
for key in key_arr:
all_paths += return_dict[key]
return all_paths
def rnd_walk_workers(self,deep_walk_arguments,graph, permuted_idx, proc_begin, proc_end, return_dict):
walk_length, window_size = (deep_walk_arguments.walk_length, deep_walk_arguments.window_size)
all_paths = []
np.random.seed(deep_walk_arguments.seed)
for _ in range(deep_walk_arguments.number_walks):
for start_idx in permuted_idx[proc_begin: proc_end]:
path = [start_idx]
for _ in range(walk_length):
curr_idx = path[-1]
neigh = []
wgts = []
for key in graph[curr_idx]:
neigh.append(key)
wgts.append(graph[curr_idx][key]['wgt'])
if len(neigh) == 0:
path.append(curr_idx)
else:
wgts = []
for key in graph[curr_idx]:
wgts.append(graph[curr_idx][key]['wgt'])
path.append(np.random.choice(neigh, p=np.asarray(wgts) / float(sum(wgts))))
all_paths.append(list(map(str, path)))
return_dict[proc_begin] = all_paths
# 多线程的版本
def muti_thread_walk(self,deep_walk_arguments, graph, workers):
manager = mp.Manager()
return_dict = manager.dict()
jobs = []
chunk_size = len(graph) // workers
np.random.seed(deep_walk_arguments.seed)
permuted_idx = np.random.permutation(len(graph))
for i in range(workers):
proc_begin = i * chunk_size
proc_end = (i + 1) * chunk_size
if i == workers - 1:
proc_end = len(graph)
p = mp.Process(target=self.rnd_walk_workers, args=(deep_walk_arguments,graph, permuted_idx, proc_begin, proc_end, return_dict))
jobs.append(p)
for p in jobs:
p.start()
for proc in jobs:
proc.join()
all_paths = []
key_arr = sorted(return_dict.keys())
np.random.shuffle(key_arr)
for key in key_arr:
all_paths += return_dict[key]
return all_paths
def generate_walks(self, deep_walk_arguments, graph, workers):
def rnd_walk_workers(graph, permuted_idx, proc_begin, proc_end, return_dict):
walk_length, window_size = (deep_walk_arguments.walk_length, deep_walk_arguments.window_size)
all_paths = []
np.random.seed(deep_walk_arguments.seed)
for _ in range(deep_walk_arguments.number_walks):
for start_idx in permuted_idx[proc_begin: proc_end]:
path = [start_idx]
for _ in range(walk_length):
curr_idx = path[-1]
neigh = []
wgts = []
for key in graph[curr_idx]:
neigh.append(key)
wgts.append(graph[curr_idx][key]['wgt'])
if len(neigh) == 0:
path.append(curr_idx)
else:
wgts = []
for key in graph[curr_idx]:
wgts.append(graph[curr_idx][key]['wgt'])
path.append(np.random.choice(neigh, p= np.asarray(wgts) / float(sum(wgts))))
all_paths.append(list(map(str, path)))
return_dict[proc_begin] = all_paths
manager = mp.Manager()
return_dict = manager.dict()
jobs = []
chunk_size = len(graph) // workers
np.random.seed(deep_walk_arguments.seed)
permuted_idx = np.random.permutation(len(graph))
for i in range(workers):
proc_begin = i * chunk_size
proc_end = (i + 1) * chunk_size
if i == workers - 1:
proc_end = len(graph)
p = mp.Process(target=rnd_walk_workers, args=(graph, permuted_idx, proc_begin, proc_end, return_dict))
jobs.append(p)
for p in jobs:
p.start()
for proc in jobs:
proc.join()
all_paths = []
key_arr = sorted(return_dict.keys())
np.random.shuffle(key_arr)
for key in key_arr:
all_paths += return_dict[key]
return all_paths
|
initserver.py
|
import settings
settings.generateConfigFile()
import reddit
import socketserverhandler
import socketservervideogenerator
from time import sleep
import database
import datetime
from threading import Thread
import atexit
def getScripts():
global lastUpdate
print("Grabbing more scripts...")
info = reddit.getInfo('AskReddit', settings.reddit_amount_posts)
new_scripts = len([script for script in info if not script.update])
updating_scripts = len([script for script in info if script.update])
print("Adding %s new scripts, updating %s" % (new_scripts, updating_scripts))
for script in info:
if script.update:
database.updateSubmission(script)
else:
database.addSubmission(script)
lastUpdate = datetime.datetime.now()
lastUpdate = None
def updateScripts():
while True:
sleep(10)
if lastUpdate is None:
getScripts()
now = datetime.datetime.now()
if not lastUpdate.hour == now.hour:
print("Getting more scripts - last update at %s" % lastUpdate)
getScripts()
def init():
socketserverhandler.startServer()
socketservervideogenerator.startVideoGeneratorServer()
thread = Thread(target=updateScripts)
thread.start()
#youtubequeue.initQueue()
#socketclient.connectToServer()
#print(checkValueExists("scriptid", "t5_2qh1i"))
#updateScriptStatus("EDITING", "t5_2qh1i")
#print(getVideoCountFromStatus("RAW"))
#print(getRowCount("scripts"))x
def exit_handler():
print("Safe Exit")
socketserverhandler.socket.close()
socketservervideogenerator.socket.close()
if __name__ == "__main__":
atexit.register(exit_handler)
init()
|
simulator.py
|
from utils import GFrame
from sokoban import *
from utils import *
from state import *
from time import sleep
from threading import Thread, currentThread, active_count
from tkthread import TkThread
import random
class Simulator():
def __init__(self, map="fake", wait_time=1):
self._map = map
self._wait_time = wait_time
self._verbose = False
def _setup(self):
self._frame = GFrame("Sokoban Simulator")
self._game = Sokoban(30, self._map, True)
self._frame.display(self._game)
def verbose(self, b):
self._verbose = b
def swap_map(self, map_file):
self._map = map_file
def _simulate(self, agent):
action = agent.request_action(self._game.get_state())
if self._verbose:
print("Move: ", action)
self._game.move_player(action)
self._game.move_enemies()
return action
def _simulate_tree_search(self, agent, record):
if self._verbose:
print("#------------STARTING SIMULATION------------#")
# We change the way its scored
# The agent just needs to turn on ONE switch by stepping on it
self._game.get_state().update_obtained_points(5) # Give the player boots (ability to activate switches)
# Loop while no switches are activated
while all(not t for t in self._game.get_state().get_switches().values()):
sleep(self._wait_time)
record.append(self._simulate(agent))
if self._verbose:
print("#------------SIMULATION FINISHED------------#")
self._game.set_done(True)
def _simulate_probability_agent(self, agent, sense_func):
if self._verbose:
print("#------------STARTING SIMULATION------------#")
# Simulate the given actions every "speed" seconds
while self._game.get_state().get_mouse_locations():
sleep(self._wait_time)
sense_func(self._game.get_state())
self._simulate(agent)
self._game.set_done(True)
if self._verbose:
print("#------------SIMULATION FINISHED------------#")
def simulate_generic_agent(self, agent):
self._setup()
# Quick Hack to help out when students haven't implemented a function yet
try:
agent.request_action(None)
except AttributeError:
pass
record = []
# Setup the simulation in its own thread
simulation_thread = Thread(target= lambda: self._simulate_tree_search(agent, record), daemon=True)
simulation_thread.start()
# Run the game and frame
self._frame.run()
return record
def simulate_probability_agent(self, agent, sense_func):
self._setup()
# Quick Hack to help out when students haven't implemented a function yet
try:
sense_func(None)
except AttributeError:
pass
# Setup the simulation in its own thread
simulation_thread = Thread(target = lambda : self._simulate_probability_agent(agent, sense_func), daemon=True)
simulation_thread.start()
# Run the game and frame
self._frame.run()
return self._game.get_state().get_score()
|
beerchain_transaction_receipt_origin_contract_address.py
|
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
import threading
def waitforlogs(node, contract_address):
logs = node.cli.waitforlogs(node.cli.getblockcount()-1, 1000, '{"addresses": ["'+contract_address+'"]}')
node.result = logs
class BeerchainTransactionReceiptOriginContractAddressTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-txindex']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.nodes[0].generate(10 + COINBASE_MATURITY)
"""
pragma solidity ^0.5.2;
contract Test {
event TestEvent();
address private child;
function setChildContract(address childContractAddress) external {
child = childContractAddress;
}
function doEvent() external {
if(child == address(0x0)) {
emit TestEvent();
} else {
Test(child).doEvent();
}
}
function getChildAddress() public view returns(address) {
return child;
}
}
"""
"""
Function signatures:
afd67ce7: doEvent()
bcb1c3a9: getChildAddress()
f8d86e18: setChildContract(address)
"""
# Set up a chain of 10 contracts that reference their child contract. I.e. the tenth contract is the leaf
contracts = []
contract_bytecode = "608060405234801561001057600080fd5b506102b8806100206000396000f3fe608060405234801561001057600080fd5b506004361061005e576000357c010000000000000000000000000000000000000000000000000000000090048063afd67ce714610063578063bcb1c3a91461006d578063f8d86e18146100b7575b600080fd5b61006b6100fb565b005b610075610220565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100f9600480360360208110156100cd57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610249565b005b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415610182577f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405160405180910390a161021e565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663afd67ce76040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401600060405180830381600087803b15801561020757600080fd5b5060325a03f115801561021957600080fd5b505050505b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505056fea165627a7a723058203cf61a18e40f6e2bd01b2f7bd607c6e6aff032f12bd5e3eca68212d2e2c80dbf0029"
for i in range(10):
contracts.append(self.nodes[0].createcontract(contract_bytecode)['address'])
self.node.generate(1)
if len(contracts) > 1:
self.node.sendtocontract(contracts[-2], "f8d86e18" + (contracts[-1].zfill(64)), 0, 1000000)
self.node.generate(1)
# Run the doEvent function recursively starting at the root contract and make sure that no event entries is in the returndata for waitforlogs for the first 9 contracts
for contract_address in contracts[:-1]:
thread = threading.Thread(target=waitforlogs, args=(self.node, contract_address))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 0)
# Do the same thing again but make sure that the event triggers for the "leaf" (10th) contract
thread = threading.Thread(target=waitforlogs, args=(self.node, contracts[-1]))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 1)
if __name__ == '__main__':
BeerchainTransactionReceiptOriginContractAddressTest().main()
|
test_serializable_fails_if_update_same_rows.py
|
##############
# Setup Django
import django
django.setup()
#############
# Test proper
import threading
import time
import pytest
from django.db import DatabaseError, connection, transaction
from django.db.models import F, Subquery
from app.models import Sock
@pytest.mark.django_db
def test_serializable_fails_if_update_same_rows():
def create():
Sock.objects.all().delete()
Sock.objects.create(id_a=1, id_b=1, colour='black')
create_thread = threading.Thread(target=create)
create_thread.start()
create_thread.join()
barrier_1 = threading.Barrier(2)
barrier_2 = threading.Barrier(2)
barrier_3 = threading.Barrier(2)
def update_autocommit():
sock = Sock.objects.get(id_a=1)
barrier_1.wait()
barrier_2.wait()
sock.save()
barrier_3.wait()
caught = None
def update_serializable():
nonlocal caught
try:
with transaction.atomic():
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL SERIALIZABLE')
barrier_1.wait()
sock = Sock.objects.get(id_a=1)
barrier_2.wait()
barrier_3.wait()
sock.save()
except Exception as exception:
caught = exception
update_autocommit_thread = threading.Thread(target=update_autocommit)
update_autocommit_thread.start()
update_serializable_thread = threading.Thread(target=update_serializable)
update_serializable_thread.start()
update_autocommit_thread.join()
update_serializable_thread.join()
assert isinstance(caught, DatabaseError)
assert 'could not serialize access due to concurrent update' in caught.args[0]
|
server_lifecycle.py
|
from threading import Thread
import src.datastore
def on_server_loaded(server_context):
''' If present, this function is called when the server first starts. '''
t = Thread(target=src.datastore.load, args=())
t.setDaemon(True)
t.start()
def on_server_unloaded(server_context):
''' If present, this function is called when the server shuts down. '''
pass
def on_session_created(session_context):
''' If present, this function is called when a session is created. '''
pass
def on_session_destroyed(session_context):
''' If present, this function is called when a session is closed. '''
pass
|
langserver_ext.py
|
import logging
import subprocess
import threading
from tornado import ioloop, process, web, websocket
from pyls_jsonrpc import streams
try:
import ujson as json
except Exception: # pylint: disable=broad-except
import json
log = logging.getLogger(__name__)
class LanguageServerWebSocketHandler(websocket.WebSocketHandler):
"""Setup tornado websocket handler to host an external language server."""
writer = None
def open(self, *args, **kwargs):
log.info("Spawning pyls subprocess")
# Create an instance of the language server
proc = process.Subprocess(
['pylsp', '-v'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
# Create a writer that formats json messages with the correct LSP headers
self.writer = streams.JsonRpcStreamWriter(proc.stdin)
# Create a reader for consuming stdout of the language server. We need to
# consume this in another thread
def consume():
# Start a tornado IOLoop for reading/writing to the process in this thread
ioloop.IOLoop()
reader = streams.JsonRpcStreamReader(proc.stdout)
reader.listen(lambda msg: self.write_message(json.dumps(msg)))
thread = threading.Thread(target=consume)
thread.daemon = True
thread.start()
def on_message(self, message):
"""Forward client->server messages to the endpoint."""
print(message)
self.writer.write(json.loads(message))
def check_origin(self, origin):
return True
if __name__ == "__main__":
app = web.Application([
(r"/python", LanguageServerWebSocketHandler),
])
print("APP LISTENING ON PORT 4000")
app.listen(4000, address='0.0.0.0')
ioloop.IOLoop.current().start()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import bz2
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import subprocess
import sys
import tempfile
import threading
import time
from collections import namedtuple
from datetime import datetime
from functools import partial
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import (ABNF, WebSocketException, WebSocketTimeoutException,
create_connection)
import cereal.messaging as messaging
from cereal import log
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.file_helpers import CallbackReader
from common.params import Params
from common.realtime import sec_since_boot, set_core_affinity
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.statsd import STATS_DIR
from selfdrive.swaglog import SWAGLOG_DIR, cloudlog
from selfdrive.version import get_commit, get_origin, get_short_branch, get_version
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
MAX_AGE = 31 * 24 * 3600 # seconds
WS_FRAME_SIZE = 4096
NetworkType = log.DeviceState.NetworkType
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress', 'allow_cellular'], defaults=(0, False, 0, False))
cur_upload_items: Dict[int, Any] = {}
def strip_bz2_extension(fn):
if fn.endswith('.bz2'):
return fn[:-4]
return fn
class AbortTransferException(Exception):
pass
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event, increase_count: bool = True) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
new_retry_count = item.retry_count + 1 if increase_count else item.retry_count
item = item._replace(
retry_count=new_retry_count,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
sm = messaging.SubMaster(['deviceState'])
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
# Remove item if too old
age = datetime.now() - datetime.fromtimestamp(cur_upload_items[tid].created_at / 1000)
if age.total_seconds() > MAX_AGE:
cloudlog.event("athena.upload_handler.expired", item=cur_upload_items[tid], error=True)
continue
# Check if uploading over metered connection is allowed
sm.update(0)
metered = sm['deviceState'].networkMetered
network_type = sm['deviceState'].networkType.raw
if metered and (not cur_upload_items[tid].allow_cellular):
retry_upload(tid, end_event, False)
continue
try:
def cb(sz, cur):
# Abort transfer if connection changed to metered after starting upload
sm.update(0)
metered = sm['deviceState'].networkMetered
if metered and (not cur_upload_items[tid].allow_cellular):
raise AbortTransferException
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
fn = cur_upload_items[tid].path
try:
sz = os.path.getsize(fn)
except OSError:
sz = -1
cloudlog.event("athena.upload_handler.upload_start", fn=fn, sz=sz, network_type=network_type, metered=metered, retry_count=cur_upload_items[tid].retry_count)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 401, 403, 412):
cloudlog.event("athena.upload_handler.retry", status_code=response.status_code, fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
else:
cloudlog.event("athena.upload_handler.success", fn=fn, sz=sz, network_type=network_type, metered=metered)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError):
cloudlog.event("athena.upload_handler.timeout", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event)
except AbortTransferException:
cloudlog.event("athena.upload_handler.abort", fn=fn, sz=sz, network_type=network_type, metered=metered)
retry_upload(tid, end_event, False)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
path = upload_item.path
compress = False
# If file does not exist, but does exist without the .bz2 extension we will compress on the fly
if not os.path.exists(path) and os.path.exists(strip_bz2_extension(path)):
path = strip_bz2_extension(path)
compress = True
with open(path, "rb") as f:
if compress:
cloudlog.event("athena.upload_handler.compress", fn=path, fn_orig=upload_item.path)
data = bz2.compress(f.read())
size = len(data)
data = io.BytesIO(data)
else:
size = os.fstat(f.fileno()).st_size
data = f
if callback:
data = CallbackReader(data, callback, size)
return requests.put(upload_item.url,
data=data,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion() -> Dict[str, str]:
return {
"version": get_version(),
"remote": get_origin(''),
"branch": get_short_branch(''),
"commit": get_commit(default=''),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([{
"fn": fn,
"url": url,
"headers": headers,
}])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for file in files_data:
fn = file.get('fn', '')
if len(fn) == 0 or fn[0] == '/' or '..' in fn or 'url' not in file:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path) and not os.path.exists(strip_bz2_extension(path)):
failed.append(fn)
continue
item = UploadItem(
path=path,
url=file['url'],
headers=file.get('headers', {}),
created_at=int(time.time() * 1000),
id=None,
allow_cellular=file.get('allow_cellular', False),
)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
@dispatcher.add_method
def setBandwithLimit(upload_speed_kbps, download_speed_kbps):
if not TICI:
return {"success": 0, "error": "only supported on comma three"}
try:
HARDWARE.set_bandwidth_limit(upload_speed_kbps, download_speed_kbps)
return {"success": 1}
except subprocess.CalledProcessError as e:
return {"success": 0, "error": "failed to set limit", "stdout": e.stdout, "stderr": e.stderr}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(False)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworkMetered():
network_type = HARDWARE.get_network_type()
return HARDWARE.get_network_metered(network_type)
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import jpeg_write, snapshot
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
try:
set_core_affinity([0, 1, 2, 3])
except Exception:
cloudlog.exception("failed to set core affinity")
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
runtime.py
|
import os
import sys
import re
import subprocess
import shlex
import fcntl
import time
import threading
def pipe_stdin_writer(pipe, buf):
pipe.stdin.write(buf)
pipe.stdin.close()
def runner_wrap(pipes, main, piping, outputs, pipe_timeout, pipe_timeout_is_error):
r = Runner(pipes, main, piping, outputs=outputs,
pipe_timeout=pipe_timeout, pipe_timeout_is_error=pipe_timeout_is_error)
class Runner:
def __init__(self, pipes, main, piping, outputs={}, pipe_timeout=10, pipe_timeout_is_error=False):
self.pipes = pipes
self.main = main
self.piping = piping
self.pipe_timeout = pipe_timeout
self.pipe_timeout_is_error = pipe_timeout_is_error
self.outputs = outputs
self.piped = []
self.count = 0
self.namespace = None
self.func = None
self.execf = None
self.var = None
self.piped = []
self.input_value = b''
self.pipe_threads = []
self.run()
def _error(self, enumber, msg):
print('E-RUNTIME(%d): %s' % (enumber, msg))
sys.exit(1)
def _handle_pipe(self, m, p):
if p['type'] != 'pipe':
return False
self.namespace = p['namespace']
self.func = p['func']
self.execf = self.pipes[self.namespace]['pipes'][self.func]['path']
args = ''
for i in p['args']:
args += i[1]+' '
if len(self.piped) < 1:
inp = subprocess.PIPE
else:
inp = self.piped[len(self.piped)-1].stdout
pipe = subprocess.Popen(shlex.split(r'%s %s' % (self.execf, args)),
stdin=inp, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
thread = None
## NOTE: Writing to pipe stdin will block if len is too large. We must create a new thread to operate
## independently so we can write+read at the same time through the start to end of pipeline
if self.input_value:
thread = threading.Thread(target=pipe_stdin_writer, args=(pipe, self.input_value,))
thread.start()
self.pipe_threads.append(thread)
self.input_value = b''
self.piped.append(pipe)
if self.count >= len(m):
output = b''
# use nonblocking for timeout on read (can happen on no input)
fl = fcntl.fcntl(self.piped[-1].stdout, fcntl.F_GETFL)
fcntl.fcntl(self.piped[-1].stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
then = time.time()
while 1:
try:
b = self.piped[-1].stdout.read(8192)
if not b: break
sys.stdout.write(b)
sys.stdout.flush()
then = time.time()
except:
now = time.time()
if now - then > self.pipe_timeout:
if self.pipe_timeout_is_error:
self._error(10, 'Timeout reading pipe')
break
sys.stdout.flush()
return True
def _handle_var_end(self, m, p):
if not p['type'] == 'var' or not len(self.piped) > 0:
return False
output = b''
fl = fcntl.fcntl(self.piped[-1].stdout, fcntl.F_GETFL)
fcntl.fcntl(self.piped[-1].stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
then = time.time()
while 1:
try:
b = self.piped[-1].stdout.read(1024)
if not b: break
output += b
then = time.time()
except:
now = time.time()
if now - then > self.pipe_timeout:
if self.pipe_timeout_is_error:
self._error(10, 'Timeout reading pipe')
break
self.var = p['value']
self.namespace = p['namespace']
if not self.namespace in self.outputs:
self.outputs[self.namespace] = {}
if not self.var in self.outputs[self.namespace]:
self.outputs[self.namespace][self.var] = None
if output:
self.outputs[self.namespace][self.var] = output
return True
def _handle_var_start(self, m, p):
if not p['type'] == 'var' or not len(self.piped) < 1:
return False
self.var = p['value']
self.namespace = p['namespace']
## NOTE: Wait for threads.... this is hackish but we should have already
## validated that this variable is defined during parsing.
while 1:
try:
self.input_value = self.outputs[self.namespace][self.var]
break
except KeyError:
time.sleep(0.25)
return True
def run(self):
for count, p in enumerate(self.piping, start=1):
self.count = count
if self._handle_pipe(self.piping, p): continue
if self._handle_var_end(self.piping, p): continue
if self._handle_var_start(self.piping, p): continue
# cleanup
while len(self.pipe_threads) > 0:
thread = self.pipe_threads.pop()
thread.join()
for p in self.piped:
del(p)
class Runtime:
def __init__(self, pipes, main, pipe_timeout=10, pipe_timeout_is_error=False):
self.pipes = pipes
self.main = main
self.pipe_timeout = pipe_timeout
self.pipe_timeout_is_error = pipe_timeout_is_error
self.outputs = {}
self.piped = []
self.count = 0
self.namespace = None
self.func = None
self.execf = None
self.var = None
self.threads = []
def _error(self, enumber, msg):
print('E-RUNTIME(%d): Line %d: %s' % (enumber, self.line_number, msg))
sys.exit(1)
def _is_threaded(self, m):
if m[-1]['thread'] and m[-1]['thread'] != 'wait':
return True
return False
def _is_thread_wait(self, m):
if m[-1]['thread'] == 'wait':
return True
return False
def run(self):
for m in self.main:
if self._is_thread_wait(m):
not_done = True
for thread in self.threads:
while thread.is_alive():
time.sleep(0.25)
thread.join()
continue
if self._is_threaded(m):
t = threading.Thread(target=runner_wrap,
args=(self.pipes, self.main, m, self.outputs, self.pipe_timeout, self.pipe_timeout_is_error))
t.start()
self.threads.append(t)
else:
runner = Runner(self.pipes, self.main, m, outputs=self.outputs,
pipe_timeout=self.pipe_timeout, pipe_timeout_is_error=self.pipe_timeout_is_error)
self.outputs.update(runner.outputs)
for thread in self.threads:
thread.join()
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
test_browser.py
|
import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
if EM_BUILD_VERBOSE_LEVEL >= 3:
print >> sys.stderr, "using Emscripten browser: " + str(cmd)
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL=1']) # is the default anyhow
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', tricky_filename.replace('@', '@@'), '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 1000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1', '-DSTATIC_GEARS=1'], manual_reference=True, post_build=self.post_manual_reftest)
# test noProxy option applied at runtime
# run normally (duplicates above test, but verifies we can run outside of the btest harness
self.run_browser('test.html', None, ['/report_result?0'])
# run with noProxy
self.run_browser('test.html?noProxy', None, ['/report_result?0'])
original = open('test.js').read()
def copy(to, js_mod):
open(to + '.html', 'w').write(open('test.html').read().replace('test.js', to + '.js'))
open(to + '.js', 'w').write(js_mod(open('test.js').read()))
# run with noProxy, but make main thread fail
copy('two', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WEB) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('two.html?noProxy', None, ['/report_result?999'])
self.run_browser('two.html', None, ['/report_result?0']) # this is still cool
# run without noProxy, so proxy, but make worker fail
copy('three', lambda original: original.replace('function _main($argc,$argv) {', 'function _main($argc,$argv) { if (ENVIRONMENT_IS_WORKER) { var xhr = new XMLHttpRequest(); xhr.open("GET", "http://localhost:8888/report_result?999");xhr.send(); }'))
self.run_browser('three.html', None, ['/report_result?999'])
self.run_browser('three.html?noProxy', None, ['/report_result?0']) # this is still cool
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1']).communicate()
open('test.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('{{{ SCRIPT }}}', '<script src="test.js"></script>'))
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
open(os.path.join(self.get_dir(), 'flag_0.js'), 'w').write('''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=11)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=11)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for emterps in [
[],
['-DTEST_SLEEP', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-s', 'ASSERTIONS=1', '-s', 'EMTERPRETIFY_YIELDLIST=["_EventHandler"]', '-s', "SAFE_HEAP=1"]
]:
print delay, defines, emterps
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
%s
//Module.print('push keydown');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
//Module.print('push keyup');
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + emterps + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_fflush(self):
return self.skip('Skipping due to https://github.com/kripken/emscripten/issues/2770')
self.btest('test_fflush.cpp', '0', args=['-s', 'NO_EXIT_RUNTIME=1', '--shell-file', path_from_root('tests', 'test_fflush.html')])
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret)
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']'''])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), force_c=True, args=['-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', force_c=True, args=['-DSECRET=\"' + secret + '\"', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'TOTAL_MEMORY=75000000'])
def test_force_exit(self):
self.btest('force_exit.c', force_c=True, expected='17')
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html')])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1'],
message='You should see a blue triangle.')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=2'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print 'proxy', proxy
self.btest('hello_world_gles.c', expected=map(str, range(30, 500)), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST'] + (['--proxy-to-worker'] if proxy else []))
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_emscripten_main_loop(self):
self.btest('emscripten_main_loop.cpp', '0')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1')
def test_gl_textures(self):
self.btest('gl_textures.cpp', '0')
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
for opts in [0, 1]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=['-O' + str(opts), 'side.c', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print what, status
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
window.onerror = function() {
Module.print('fail!');
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?0');
xhr.onload = function() {
console.log('close!');
window.close();
};
xhr.send();
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
Module.print('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
Module.print('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
setTimeout(Module['_free'], 1000); // free is valid to call even after the runtime closes
'''
open('pre_main.js', 'w').write(r'''
Module._main = function(){
myJSCallback();
return 0;
};
''')
open('pre_runtime.js', 'w').write(r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_main.js'], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
print '\n', filename, extra_args
print 'mem init, so async, call too early'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1'] + extra_args)
print 'sync startup, call too late'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '--memory-init-file', '0'] + extra_args)
print 'sync, runtime still alive, so all good'
open(os.path.join(self.get_dir(), 'post.js'), 'w').write(post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook);
self.btest(filename, expected='606', args=['--post-js', 'post.js', '--memory-init-file', '0', '-s', 'NO_EXIT_RUNTIME=1'] + extra_args)
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
# TODO: test only worked in non-fastcomp
def test_module(self):
return self.skip('non-fastcomp is deprecated and fails in 3.5')
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--verbose', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if emscripten_browser is not None:
args += ['--browser', emscripten_browser]
args += [os.path.join(outdir, 'hello_world.html'), '1', '2', '--3']
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: --3' in stdout
assert 'hello, world!' in stdout
assert 'Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' in stdout
assert 'Testing char sequences: %20%21 ä' in stdout
assert 'hello, error stream!' in stderr
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js'], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js('test.js', full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0')
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts, expected='0')
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print opts
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-s', 'NO_EXIT_RUNTIME=1'], expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print 'opt level', opt_level
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2']) # empty polyfill, but browser has support, so semantics are like float
# now use a shell to remove the browser's fround support
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', '''
Math.fround = null;
var Module = {
'''))
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=2']) # empty polyfill, no browser support, so semantics are like double
# finally, remove fround, patch up fround as the code executes (after polyfilling etc.), to verify that we got rid of it entirely on the client side
fixer = 'python fix.py'
open('fix.py', 'w').write(r'''
import sys
filename = sys.argv[1]
js = open(filename).read()
replaced = js.replace("var Math_fround = Math.fround;", "var Math_fround = Math.fround = function(x) { return 0; }")
assert js != replaced
open(filename, 'w').write(replaced)
''')
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer]) # no fround anyhow
self.btest(path_from_root('tests', 'codemods.cpp'), expected='121378', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=1']) # proper polyfill was enstated, then it was replaced by the fix so 0 is returned all the time, hence a different result here
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=2']) # we should remove the calls to the polyfill ENTIRELY here, on the clientside, so we should NOT see any calls to fround here, and result should be like double
def test_wget(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1'])
print 'asyncify+emterpreter'
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY=1', '-s', 'EMTERPRETIFY=1'])
def test_wget_data(self):
with open(os.path.join(self.get_dir(), 'test.txt'), 'w') as f:
f.write('emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2'])
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O2', '-g2', '-s', 'ASSERTIONS=1'])
def test_locate_file(self):
self.clear()
open('src.cpp', 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT();
return 0;
}
'''))
open('data.txt', 'w').write('load me right before...')
open('pre.js', 'w').write('Module.locateFile = function(x) { return "sub/" + x };')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w')).communicate()
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
Popen([PYTHON, EMCC, 'src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html']).communicate()
os.mkdir('sub')
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
def test_glfw3(self):
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_glfw3_events(self):
self.btest(path_from_root('tests', 'glfw3_events.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-s', 'USE_GLFW=3'], expected='1')
def test_asm_swapping(self):
self.clear()
open('run.js', 'w').write(r'''
Module['_main'] = function() {
// test proper initial result
var result = Module._func();
console.log('first: ' + result);
if (result !== 10) throw 'bad first result';
// load second module to be swapped in
var second = document.createElement('script');
second.onload = function() { console.log('loaded second') };
second.src = 'second.js';
document.body.appendChild(second);
console.log('second appended');
Module['onAsmSwap'] = function() {
console.log('swapped');
// verify swapped-in result
var result = Module._func();
console.log('second: ' + result);
if (result !== 22) throw 'bad second result';
Module._report(999);
console.log('reported');
};
};
''')
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
print opts
open('second.cpp', 'w').write(self.with_report_result(open(path_from_root('tests', 'asm_swap2.cpp')).read()))
Popen([PYTHON, EMCC, 'second.cpp'] + opts).communicate()
Popen([PYTHON, path_from_root('tools', 'distill_asm.py'), 'a.out.js', 'second.js', 'swap-in']).communicate()
assert os.path.exists('second.js')
if isinstance(SPIDERMONKEY_ENGINE, list) and len(SPIDERMONKEY_ENGINE[0]) != 0:
out = run_js('second.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE, full_output=True, assert_returncode=None)
self.validate_asmjs(out)
else:
print 'Skipping asm validation check, spidermonkey is not configured'
self.btest(path_from_root('tests', 'asm_swap.cpp'), args=['-s', 'SWAPPABLE_ASM_MODULE=1', '-s', 'NO_EXIT_RUNTIME=1', '--pre-js', 'run.js'] + opts, expected='999')
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl2_key(self):
for defines in [[]]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_key.c'), '-o', 'page.html'] + defines + ['-s', 'USE_SDL=2','--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?7436429')
def test_sdl2_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl2_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl2_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?712')
def test_sdl2_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl2_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2']).communicate()
self.run_browser('page.html', '', '/report_result?572')
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True) # XXX closure fails on proxy
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
def zzztest_sdl2_gfx_primitives(self):
self.btest('sdl2_gfx_primitives.c', args=['-s', 'USE_SDL=2', '-lSDL2_gfx'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl2_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl2_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'])
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 1000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
open('data.txt', 'w').write('datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl2_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl2_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2','--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
def test_emterpreter_async(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-g2'])
def test_emterpreter_async_2(self):
self.btest('emterpreter_async_2.cpp', '40', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O3'])
def test_emterpreter_async_virtual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual.cpp', '5', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-profiling'])
def test_emterpreter_async_virtual_2(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_virtual_2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'ASSERTIONS=1', '-s', 'SAFE_HEAP=1', '-profiling'])
def test_emterpreter_async_bad(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_bad.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_middle"]', '-s', 'ASSERTIONS=1'])
def test_emterpreter_async_mainloop(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_mainloop.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts)])
def test_emterpreter_async_with_manual(self):
for opts in [0, 1, 2, 3]:
print opts
self.btest('emterpreter_async_with_manual.cpp', '121', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-O' + str(opts), '-s', 'EMTERPRETIFY_BLACKLIST=["_acall"]'])
def test_emterpreter_async_sleep2(self):
self.btest('emterpreter_async_sleep2.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Oz'])
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os', '-s', 'ASSERTIONS=1', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'EMTERPRETIFY_YIELDLIST=["__Z14audio_callbackPvPhi", "__ZN6Beeper15generateSamplesIhEEvPT_i", "__ZN6Beeper15generateSamplesIsEEvPT_i"]', '-s', 'SAFE_HEAP=1'])
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-s', 'EMTERPRETIFY=1', '-s', 'EMTERPRETIFY_ASYNC=1', '-Os'])
def test_modularize(self):
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2']]:
for args, code in [
([], 'Module();'), # defaults
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld();
'''), # use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
var hello = HelloWorld({ noInitialRun: true, onRuntimeInitialized: function() {
setTimeout(function() { hello._main(); }); // must be async, because onRuntimeInitialized may be called synchronously, so |hello| is not yet set!
} });
'''), # pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
var hello = HelloWorld({ noInitialRun: true});
hello._main();
'''), # similar, but without a mem init file, everything is sync and simple
]:
print 'test on', opts, args, code
src = open(path_from_root('tests', 'browser_test_hello_world.c')).read()
open('test.c', 'w').write(self.with_report_result(src))
Popen([PYTHON, EMCC, 'test.c', '-s', 'MODULARIZE=1'] + args + opts).communicate()
open('a.html', 'w').write('''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
output = Popen([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue']).communicate()[0]
assert os.path.exists('glue.cpp')
assert os.path.exists('glue.js')
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I' + path_from_root('tests', 'webidl'), '-DBROWSER'])
|
rse.py
|
# -*- coding: utf-8 -*-
# Copyright 2014-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2014-2016
# - Vincent Garonne <vincent.garonne@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
"""
Abacus-RSE is a daemon to update RSE counters.
"""
import logging
import os
import socket
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging
from rucio.common.utils import get_thread_with_periodic_running_function, daemon_sleep
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rse_counter import get_updated_rse_counters, update_rse_counter, fill_rse_counter_history_table
graceful_stop = threading.Event()
def rse_update(once=False, sleep_time=10):
"""
Main loop to check and update the RSE Counters.
"""
logging.info('rse_update: starting')
logging.info('rse_update: started')
# Make an initial heartbeat so that all abacus-rse daemons have the correct worker number on the next try
executable = 'abacus-rse'
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
while not graceful_stop.is_set():
try:
# Heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
# Select a bunch of rses for to update for this worker
start = time.time() # NOQA
rse_ids = get_updated_rse_counters(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'])
logging.debug('Index query time %f size=%d' % (time.time() - start, len(rse_ids)))
# If the list is empty, sent the worker to sleep
if not rse_ids and not once:
logging.info('rse_update[%s/%s] did not get any work' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1))
daemon_sleep(start_time=start, sleep_time=sleep_time, graceful_stop=graceful_stop)
else:
for rse_id in rse_ids:
if graceful_stop.is_set():
break
start_time = time.time()
update_rse_counter(rse_id=rse_id)
logging.debug('rse_update[%s/%s]: update of rse "%s" took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rse_id, time.time() - start_time))
except Exception:
logging.error(traceback.format_exc())
if once:
break
logging.info('rse_update: graceful stop requested')
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
logging.info('rse_update: graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, fill_history_table=False, sleep_time=10):
"""
Starts up the Abacus-RSE threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
executable = 'abacus-rse'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
logging.info('main: executing one iteration only')
rse_update(once)
else:
logging.info('main: starting threads')
threads = [threading.Thread(target=rse_update, kwargs={'once': once, 'sleep_time': sleep_time}) for i in
range(0, threads)]
if fill_history_table:
threads.append(get_thread_with_periodic_running_function(3600, fill_rse_counter_history_table, graceful_stop))
[t.start() for t in threads]
logging.info('main: waiting for interrupts')
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
server.py
|
# -*- coding: utf-8 -*-
"""
DNS server framework - intended to simplify creation of custom resolvers.
Comprises the following components:
DNSServer - socketserver wrapper (in most cases you should just
need to pass this an appropriate resolver instance
and start in either foreground/background)
DNSHandler - handler instantiated by DNSServer to handle requests
The 'handle' method deals with the sending/receiving
packets (handling TCP length prefix) and delegates
the protocol handling to 'get_reply'. This decodes
packet, hands off a DNSRecord to the Resolver instance,
and encodes the returned DNSRecord.
In most cases you dont need to change DNSHandler unless
you need to get hold of the raw protocol data in the
Resolver
DNSLogger - The class provides a default set of logging functions for
the various stages of the request handled by a DNSServer
instance which are enabled/disabled by flags in the 'log'
class variable.
Resolver - Instance implementing a 'resolve' method that receives
the decodes request packet and returns a response.
To implement a custom resolver in most cases all you need
is to implement this interface.
Note that there is only a single instance of the Resolver
so need to be careful about thread-safety and blocking
The following examples use the server framework:
fixedresolver.py - Simple resolver which will respond to all
requests with a fixed response
zoneresolver.py - Resolver which will take a standard zone
file input
shellresolver.py - Example of a dynamic resolver
proxy.py - DNS proxy
intercept.py - Intercepting DNS proxy
>>> resolver = BaseResolver()
>>> logger = DNSLogger(prefix=False)
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger)
>>> server.start_thread()
>>> q = DNSRecord.question("abc.def")
>>> a = q.send("localhost",8053)
Request: [...] (udp) / 'abc.def.' (A)
Reply: [...] (udp) / 'abc.def.' (A) / NXDOMAIN
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
>>> server.stop()
>>> class TestResolver:
... def resolve(self,request,handler):
... reply = request.reply()
... reply.add_answer(*RR.fromZone("abc.def. 60 A 1.2.3.4"))
... return reply
>>> resolver = TestResolver()
>>> server = DNSServer(resolver,port=8053,address="localhost",logger=logger,tcp=True)
>>> server.start_thread()
>>> a = q.send("localhost",8053,tcp=True)
Request: [...] (tcp) / 'abc.def.' (A)
Reply: [...] (tcp) / 'abc.def.' (A) / RRs: A
>>> print(DNSRecord.parse(a))
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: ...
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;abc.def. IN A
;; ANSWER SECTION:
abc.def. 60 IN A 1.2.3.4
>>> server.stop()
"""
from __future__ import print_function
import binascii,socket,struct,threading,time
try:
import socketserver
except ImportError:
import SocketServer as socketserver
from dnslib import DNSRecord,DNSError,QTYPE,RCODE,RR
class BaseResolver(object):
"""
Base resolver implementation. Provides 'resolve' method which is
called by DNSHandler with the decode request (DNSRecord instance)
and returns a DNSRecord instance as reply.
In most cases you should be able to create a custom resolver by
just replacing the resolve method with appropriate resolver code for
application (see fixedresolver/zoneresolver/shellresolver for
examples)
Note that a single instance is used by all DNSHandler instances so
need to consider blocking & thread safety.
"""
def resolve(self,request,handler):
"""
Example resolver - respond to all requests with NXDOMAIN
"""
reply = request.reply()
reply.header.rcode = getattr(RCODE,'NXDOMAIN')
return reply
class DNSHandler(socketserver.BaseRequestHandler):
"""
Handler for socketserver. Transparently handles both TCP/UDP requests
(TCP requests have length prepended) and hands off lookup to resolver
instance specified in <SocketServer>.resolver
"""
udplen = 0 # Max udp packet length (0 = ignore)
def handle(self):
if self.server.socket_type == socket.SOCK_STREAM:
self.protocol = 'tcp'
data = self.request.recv(8192)
length = struct.unpack("!H",bytes(data[:2]))[0]
while len(data) - 2 < length:
new_data = self.request.recv(8192)
if not new_data:
break
data += new_data
data = data[2:]
else:
self.protocol = 'udp'
data,connection = self.request
self.server.logger.log_recv(self,data)
try:
rdata = self.get_reply(data)
self.server.logger.log_send(self,rdata)
if self.protocol == 'tcp':
rdata = struct.pack("!H",len(rdata)) + rdata
self.request.sendall(rdata)
else:
connection.sendto(rdata,self.client_address)
except DNSError as e:
self.server.logger.log_error(self,e)
def get_reply(self,data):
request = DNSRecord.parse(data)
self.server.logger.log_request(self,request)
resolver = self.server.resolver
reply = resolver.resolve(request,self)
self.server.logger.log_reply(self,reply)
if self.protocol == 'udp':
rdata = reply.pack()
if self.udplen and len(rdata) > self.udplen:
truncated_reply = reply.truncate()
rdata = truncated_reply.pack()
self.server.logger.log_truncated(self,truncated_reply)
else:
rdata = reply.pack()
return rdata
class DNSLogger:
"""
The class provides a default set of logging functions for the various
stages of the request handled by a DNSServer instance which are
enabled/disabled by flags in the 'log' class variable.
To customise logging create an object which implements the DNSLogger
interface and pass instance to DNSServer.
The methods which the logger instance must implement are:
log_recv - Raw packet received
log_send - Raw packet sent
log_request - DNS Request
log_reply - DNS Response
log_truncated - Truncated
log_error - Decoding error
log_data - Dump full request/response
"""
def __init__(self,log="",prefix=True):
"""
Selectively enable log hooks depending on log argument
(comma separated list of hooks to enable/disable)
- If empty enable default log hooks
- If entry starts with '+' (eg. +send,+recv) enable hook
- If entry starts with '-' (eg. -data) disable hook
- If entry doesn't start with +/- replace defaults
Prefix argument enables/disables log prefix
"""
default = ["request","reply","truncated","error"]
log = log.split(",") if log else []
enabled = set([ s for s in log if s[0] not in '+-'] or default)
[ enabled.add(l[1:]) for l in log if l.startswith('+') ]
[ enabled.discard(l[1:]) for l in log if l.startswith('-') ]
for l in ['log_recv','log_send','log_request','log_reply',
'log_truncated','log_error','log_data']:
if l[4:] not in enabled:
setattr(self,l,self.log_pass)
self.prefix = prefix
def log_pass(self,*args):
pass
def log_prefix(self,handler):
if self.prefix:
return "%s [%s:%s] " % (time.strftime("%Y-%m-%d %X"),
handler.__class__.__name__,
handler.server.resolver.__class__.__name__)
else:
return ""
def log_recv(self,handler,data):
print("%sReceived: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_send(self,handler,data):
print("%sSent: [%s:%d] (%s) <%d> : %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
len(data),
binascii.hexlify(data)))
def log_request(self,handler,request):
print("%sRequest: [%s:%d] (%s) / '%s' (%s)" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
request.q.qname,
QTYPE[request.q.qtype]))
self.log_data(request)
def log_reply(self,handler,reply):
if reply.header.rcode == RCODE.NOERROR:
print("%sReply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
else:
print("%sReply: [%s:%d] (%s) / '%s' (%s) / %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
RCODE[reply.header.rcode]))
self.log_data(reply)
def log_truncated(self,handler,reply):
print("%sTruncated Reply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
reply.q.qname,
QTYPE[reply.q.qtype],
",".join([QTYPE[a.rtype] for a in reply.rr])))
self.log_data(reply)
def log_error(self,handler,e):
print("%sInvalid Request: [%s:%d] (%s) :: %s" % (
self.log_prefix(handler),
handler.client_address[0],
handler.client_address[1],
handler.protocol,
e))
def log_data(self,dnsobj):
print("\n",dnsobj.toZone(" "),"\n",sep="")
class UDPServer(socketserver.ThreadingMixIn,socketserver.UDPServer):
allow_reuse_address = True
class TCPServer(socketserver.ThreadingMixIn,socketserver.TCPServer):
allow_reuse_address = True
class DNSServer(object):
"""
Convenience wrapper for socketserver instance allowing
either UDP/TCP server to be started in blocking more
or as a background thread.
Processing is delegated to custom resolver (instance) and
optionally custom logger (instance), handler (class), and
server (class)
In most cases only a custom resolver instance is required
(and possibly logger)
"""
def __init__(self,resolver,
address="",
port=53,
tcp=False,
logger=None,
handler=DNSHandler,
server=None):
"""
resolver: resolver instance
address: listen address (default: "")
port: listen port (default: 53)
tcp: UDP (false) / TCP (true) (default: False)
logger: logger instance (default: DNSLogger)
handler: handler class (default: DNSHandler)
server: socketserver class (default: UDPServer/TCPServer)
"""
if not server:
if tcp:
server = TCPServer
else:
server = UDPServer
self.server = server((address,port),handler)
self.server.resolver = resolver
self.server.logger = logger or DNSLogger()
def start(self):
self.server.serve_forever()
def start_thread(self):
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.server.shutdown()
def isAlive(self):
return self.thread.isAlive()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
judge.py
|
from re import sub
import epicbox
import threading
from django.conf import settings
epicbox.configure(profiles=[
epicbox.Profile("python", "0xecho/python3.8.12:latest")
])
GLOBAL_LIMITS = {"cputime": 5*60, "memory": 512}
def judge(submission):
t = threading.Thread(target=judge_worker, args=(submission,))
t.start()
def judge_worker(submission):
file_name = submission.code_file.name
file_content = open(file_name, "rb").read()
file_name = file_name.split("/")[-1]
if not file_name.endswith(".py"):
file_name += ".py"
files = [
{"name": file_name, "content": file_content},
{"name": "runner.py", "content": open(settings.RUNNER_FILE_PATH, "rb").read()},
{"name": "gen.py", "content": open(settings.GAME_FILE_PATH, "rb").read()},
]
result = epicbox.run("python", f"python3 runner.py gen.py {file_name} {submission.seed}", files=files, limits=GLOBAL_LIMITS)
submission.errors = str(result)
submission.save()
output = result.get("stdout").decode()
output = eval(output)
moves = output.get("MOVES")
indexes = output.get("INDEXES")
score = output.get("SCORE")
submission.moves_history = ",".join(moves)
submission.indexes_state = indexes
submission.score = float(score.strip())
submission.save()
|
main.py
|
import cv2
from configs.settings import Settings
import darknet
from threading import Thread
import time, pylaxz
from queue import Queue
"""
Configs = {debug},{own_camera}
"""
class Application:
def __init__(self) -> object:
self.load_camera() if hostcamera else self.load_hub()
if not nodetect: self.load_detector()
def load_camera(self):
self.cap = cv2.VideoCapture(0)
self.cap.set(3,640)
self.cap.set(4,480)
time.sleep(3.0)
def load_hub(self):
import imagezmq
self.cap =imagezmq.ImageHub()
def reply(self):
self.cap.send_reply(b"OK")
def get_image(self):
if hostcamera:
_ , img = self.cap.read()
img = cv2.resize(img, (self.network_width, self.network_height), interpolation=cv2.INTER_LINEAR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
else:
img = self.cap.recv_image()
return img
def load_detector(self):
self.detector = darknet.LoadNetwork(networks['cfg'], networks['data'],
networks['weight'], float(networks['thresh']), int(networks['batch_size']) )
self.network_width = self.detector.network_w
self.network_height = self.detector.network_h
def detect():
if debug : pylaxz.printf('detect thread started.' ,_int=1)
while app.run:
detections = app.detector.detect_image(q_dk_frame.get(), q_frame.get())
try:
if detections: pylaxz.printf(detections, _int=True) #TODO another thread or process
except KeyboardInterrupt:
app.run = False
if hostcamera : app.cap.release()
if debug : pylaxz.printf('detect thread stopped.' ,_int=1)
def get_dk_img():
if debug : pylaxz.printf('get_dk_img thread started.' ,_int=1)
while app.run:
if q_dk_frame.empty():
q_dk_frame.put(darknet.make_image(app.network_width, app.network_height,3))
if not app.run:
with q_dk_frame.mutex:
q_dk_frame.queue.clear()
if debug : pylaxz.printf('get_dk_img thread stopped.' ,_int=1)
break
def get_img():
if debug : pylaxz.printf('get_img thread started.' ,_int=1)
while app.run:
if q_frame.empty(): q_frame.put(app.get_image())
if not hostcamera : app.reply()
if not app.run:
with q_frame.mutex:
q_frame.clear()
if debug : pylaxz.printf('get_img thread stopped.' ,_int=1)
break
if __name__ == "__main__":
s = Settings()
networks = s.get_network
debug, nodetect, hostcamera, gui = s.appconfigs
app = Application()
app.run = True
q_frame = Queue(maxsize=1)
q_dk_frame = Queue(maxsize=1)
t_get_frame = Thread(target=get_img, args=())
t_get_dk_frame = Thread(target=get_dk_img, args=())
t_detect_plate = Thread(target=detect, args=())
t_get_frame.start()
t_get_dk_frame.start()
t_detect_plate.start()
pylaxz.printf('Network is Loaded.\nThree threads started.', _int=1)
|
cheekymonkey.py
|
#!/usr/bin/env python3
import timeit
import os
import arcade
# from arcade.examples.frametime_plotter import FrametimePlotter
# from arcade import FrametimePlotter
from pyglet.gl import GL_NEAREST, GL_LINEAR
import pymunk
import logging
import math
import time
import threading
import argparse
from signal import signal, SIGINT
from create_level import create_level_1
from physics_utility import (
PymunkSprite,
check_grounding,
resync_physics_sprites,
)
from player_utility import Player
from constants import *
import constants
from k8s_kill_pod import *
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
def exit_handler(signal_received, frame):
# TODO: Handle cleanup here
logging.info('SIGINT or CTRL-C caught. Exiting game.')
exit(0)
class PhysicsSprite(arcade.Sprite):
def __init__(self, pymunk_shape, filename):
super().__init__(filename, center_x=pymunk_shape.body.position.x, center_y=pymunk_shape.body.position.y)
self.pymunk_shape = pymunk_shape
self.HITCOUNT = 0
class CircleSprite(PhysicsSprite):
def __init__(self, pymunk_shape, filename):
super().__init__(pymunk_shape, filename)
self.width = pymunk_shape.radius * 2
self.height = pymunk_shape.radius * 2
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
super().__init__(width, height, title)
# arcade.set_background_color((0, 196, 231)) # lighter background
arcade.set_background_color((0, 101, 186)) # darker background
# Used for dragging shapes around with the mouse
self.shape_being_dragged = None
self.last_mouse_position = 0, 0
# Draw and processing timings
self.draw_time = 0
self.processing_time = 0
# FPS Counter
self.last_time = None
self.frame_count = 0
self.fps_message = None
# Holds the status message for pods killed
self.LAST_POD_KILLED = None
def on_close(self):
exit_handler(SIGINT, 0)
def setup(self):
""" Set up the game and initialize the variables. """
# -- Pymunk
self.space = pymunk.Space()
self.space.gravity = GRAVITY
# Physics joint used for grabbing items
self.grab_joint = None
# Lists of sprites
# self.dynamic_sprite_list = arcade.SpriteList[PymunkSprite]()
self.dynamic_sprite_list = arcade.SpriteList() # I dont know why, but this had to change from the above line
self.static_sprite_list = arcade.SpriteList()
self.static_sprite_list.is_static = True
self.bg_sprite_list = arcade.SpriteList()
self.bg_sprite_list.is_static = True
self.fg_sprite_list = arcade.SpriteList()
self.fg_sprite_list.is_static = True
self.ball_sprite_list: arcade.SpriteList[PhysicsSprite] = arcade.SpriteList()
# Current force applied to the player for movement by keyboard
self.force = (0, 0)
# Set the viewport boundaries
# These numbers set where we have 'scrolled' to.
self.view_left = 0
self.view_bottom = 0
# Track the current state of what key is pressed
self.left_pressed = False
self.right_pressed = False
self.up_pressed = False
self.down_pressed = False
self.is_jumping = False
# Holds game state
self.game_over = False
# Array for storing emitters
self.emitters = []
# self.frametime_plotter = FrametimePlotter()
# pyglet.clock.schedule_once(self.emitter, 1)
# Build the level
create_level_1(self.space, self.static_sprite_list, self.dynamic_sprite_list, self.bg_sprite_list, self.fg_sprite_list)
# Set up the player
x = 50
y = int((SCREEN_HEIGHT / 2))
# self.player = Player("./images/tiles/grassMid.png", x, y, scale=0.5, moment=pymunk.inf, mass=1)
self.player = Player("./images/Char_Monkey_Free_Images/Animations/monkey_idle.png", x, y, scale=0.5, moment=pymunk.inf, mass=1)
# self.player.center_x = SCREEN_WIDTH / 2
# self.player.center_y = SCREEN_HEIGHT / 2
self.dynamic_sprite_list.append(self.player)
self.space.add(self.player.body, self.player.shape)
# logging.info("Number of dynamic sprites created: %s", len(self.dynamic_sprite_list))
# Load sounds
self.jump_sound = arcade.load_sound("./sounds/jump3.wav")
self.punch_sound = arcade.load_sound("./sounds/woodhit.wav")
self.explode_sound = arcade.load_sound("./sounds/432668__dxeyes__crate-break-4.wav")
self.ball_sound = arcade.load_sound("./sounds/laser4.wav")
self.game_over_sound = arcade.load_sound("./sounds/277441__xtrgamr__tones-of-victory.wav")
self.game_over_sound_did_play = False
# self.intro_sound = arcade.load_sound("./sounds/277441__xtrgamr__tones-of-victory.wav")
# self.intro_sound_did_play = False
def on_draw(self):
""" Render the screen. """
self.frame_count += 1
# This command has to happen before we start drawing
arcade.start_render()
# for sprite in self.dynamic_sprite_list: # Draw hitboxes for debugging
# sprite.draw_hit_box(arcade.color.RED, 3)
# for sprite in self.static_sprite_list:
# sprite.draw_hit_box(arcade.color.BLUE, 3)
# print("Number of dynamic sprites present:", len(self.dynamic_sprite_list))
# Start timing how long rendering takes
draw_start_time = timeit.default_timer()
# Draw all the sprites
self.bg_sprite_list.draw(filter=GL_NEAREST)
self.static_sprite_list.draw(filter=GL_NEAREST)
self.dynamic_sprite_list.draw(filter=GL_NEAREST)
self.ball_sprite_list.draw()
self.fg_sprite_list.draw(filter=GL_NEAREST)
# Testing shapes
# arcade.draw_polygon_filled(( (80, 60), (80, 0), (20, 0)), arcade.color.RED)
# arcade.draw_polygon_filled(( (20, 120), (20, 60), (80, 60)), arcade.color.RED)
# Draw emitters
for e in self.emitters:
e.draw()
# print(e.get_count())
# Display game over screen when needed
if self.game_over:
arcade.draw_text("Game Over", self.view_left + SCREEN_WIDTH / 4, self.view_bottom + SCREEN_HEIGHT / 2, arcade.color.BLACK, 100, font_name = ["Impact", "Courier", "Helvetica"])
# Once per split second
if self.frame_count % 20 == 0:
self.player.punching = False # Unset the punching animation
# Display FPS
if self.last_time and self.frame_count % 60 == 0:
fps = 1.0 / (time.time() - self.last_time) * 60
self.fps_message = f"FPS: {fps:5.0f}"
if self.fps_message:
arcade.draw_text(self.fps_message, self.view_left + 10, self.view_bottom + 60, arcade.color.BLACK, 14)
if self.frame_count % 60 == 0:
self.last_time = time.time()
# Display timings
# output = f"Processing time: {self.processing_time:.3f}"
# arcade.draw_text(output, 20 + self.view_left, SCREEN_HEIGHT - 60 + self.view_bottom, arcade.color.WHITE, 12)
# output = f"Drawing time: {self.draw_time:.3f}"
# arcade.draw_text(output, 20 + self.view_left, SCREEN_HEIGHT - 80 + self.view_bottom, arcade.color.WHITE, 12)
# Display instructions
# output = "Use the mouse to move boxes, space to punch, hold G to grab an item to the right."
# arcade.draw_text(output, 20 + self.view_left, SCREEN_HEIGHT - 40 + self.view_bottom, arcade.color.WHITE, 12)
# Display last pod killed
if self.LAST_POD_KILLED:
output = f"Last pod killed: {self.LAST_POD_KILLED}"
arcade.draw_text(output, 20 + self.view_left, SCREEN_HEIGHT - 20 + self.view_bottom, arcade.color.WHITE, 12, font_name = ["Helvetica"])
self.draw_time = timeit.default_timer() - draw_start_time
def on_mouse_press(self, x, y, button, modifiers):
""" Handle mouse down events """
if button == arcade.MOUSE_BUTTON_LEFT:
# Store where the mouse is clicked. Adjust accordingly if we've
# scrolled the viewport.
self.last_mouse_position = (x + self.view_left, y + self.view_bottom)
# See if we clicked on any physics object
shape_list = self.space.point_query(self.last_mouse_position, 1, pymunk.ShapeFilter())
# If we did, remember what we clicked on
if len(shape_list) > 0:
self.shape_being_dragged = shape_list[0]
elif button == arcade.MOUSE_BUTTON_RIGHT:
# With right mouse button, shoot a heavy coin fast.
mass = 30
radius = 10
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
body.position = (x + self.view_left, y + self.view_bottom)
body.velocity = 2000, 0
shape = pymunk.Circle(body, radius, pymunk.Vec2d(0, 0))
shape.friction = 0.3
arcade.play_sound(self.ball_sound)
self.space.add(body, shape)
sprite = CircleSprite(shape, "./images/items/coinGold.png")
self.ball_sprite_list.append(sprite)
def on_mouse_release(self, x, y, button, modifiers):
""" Handle mouse up events """
if button == arcade.MOUSE_BUTTON_LEFT:
# Release the item we are holding (if any)
self.shape_being_dragged = None
def on_mouse_motion(self, x, y, dx, dy):
""" Handle mouse motion events """
if self.shape_being_dragged is not None:
# If we are holding an object, move it with the mouse
self.last_mouse_position = (x + self.view_left, y + self.view_bottom)
self.shape_being_dragged.shape.body.position = self.last_mouse_position
self.shape_being_dragged.shape.body.velocity = dx * 20, dy * 20
def scroll_viewport(self):
""" Manage scrolling of the viewport. """
# Flipped to true if we need to scroll
changed = False
# Scroll left
# if self.player.position[0] > -constants.WORLD_SIZE + VIEWPORT_MARGIN: # Only scroll left if not near edge of world
left_bndry = self.view_left + VIEWPORT_MARGIN
if self.player.left < left_bndry:
self.view_left -= left_bndry - self.player.left
changed = True
# Scroll right
# if self.player.position[0] < constants.WORLD_SIZE - VIEWPORT_MARGIN: # Only scroll right if not near edge of world
right_bndry = self.view_left + SCREEN_WIDTH - VIEWPORT_MARGIN
if self.player.right > right_bndry:
self.view_left += self.player.right - right_bndry
changed = True
# Scroll up
top_bndry = self.view_bottom + SCREEN_HEIGHT - VIEWPORT_MARGIN
if self.player.top > top_bndry:
self.view_bottom += self.player.top - top_bndry
changed = True
# Scroll down
bottom_bndry = self.view_bottom + VIEWPORT_MARGIN
if self.player.bottom < bottom_bndry:
self.view_bottom -= bottom_bndry - self.player.bottom
changed = True
if changed:
arcade.set_viewport(int(self.view_left),
int(SCREEN_WIDTH + self.view_left),
int(self.view_bottom),
int(SCREEN_HEIGHT + self.view_bottom))
# print(arcade.get_viewport())
def on_update(self, delta_time):
""" Update the sprites """
# Keep track of how long this function takes.
start_time = timeit.default_timer()
# print(math.fmod(self.frame_count, delta_time))
# print(self.frame_count)
# See if all crates have been destroyed, if so that's game over
if len(self.dynamic_sprite_list) - 1 <= 0:
# logging.info("Game Over!")
self.game_over = True
if self.game_over_sound_did_play:
self.player.texture = self.player.textures[TEXTURE_IDLE_2]
return
else:
arcade.play_sound(self.game_over_sound)
self.game_over_sound_did_play = True # Only play sound once
return # Exit early / stop updating physics after game over
# print(self.player.body.position)
# # Print key states for debugging
# logging.info("Left: %s", self.left_pressed)
# logging.info("Right: %s", self.right_pressed)
# logging.info("Up: %s", self.up_pressed)
# print(self.force, self.player.shape.friction)
# print(self.player.body.velocity[0])
# # Debug grounding
# grounding = check_grounding(self.player) # find out if player is standing on ground
# if grounding['body'] is not None:
# logging.info("Grounding: %s", grounding['normal'].x / grounding['normal'].y)
# logging.info("Player friction: %s", self.player.shape.friction)
# See if the player is standing on an item.
# If she is, apply opposite force to the item below her.
# So if she moves left, the box below her will have
# a force to move to the right.
grounding = check_grounding(self.player)
if self.force[0] and grounding and grounding['body']:
grounding['body'].apply_force_at_world_point((-self.force[0], 0), grounding['position'])
# Apply force to monkey if direction keys pressed
if self.up_pressed:
grounding = check_grounding(self.player) # find out if player is standing on ground
if grounding['body'] is not None and abs(
grounding['normal'].x / grounding['normal'].y) <= self.player.shape.friction and self.player.body.velocity[1] < 1:
# She is! Go ahead and jump
self.player.body.apply_impulse_at_local_point((0, PLAYER_JUMP_IMPULSE))
arcade.play_sound(self.jump_sound)
# else:
# # print("Not on ground, cant jump")
# pass
elif self.down_pressed and not self.up_pressed:
# logging.info("Pressed down, not currently doing anything")
pass
if self.left_pressed and not self.right_pressed:
if self.player.body.velocity[0] >= 100: # If player already running right, apply the brakes so we can switch directions faster
self.force = (-4500, 0)
self.player.shape.friction = PLAYER_FRICTION * 15
else: # Add force to the player, and set the player friction to zero
self.force = (-PLAYER_MOVE_FORCE, 0)
self.player.shape.friction = 0
elif self.right_pressed and not self.left_pressed:
if self.player.body.velocity[0] <= -100: # If player already running left, apply the brakes so we can switch directions faster
self.force = (4500, 0)
self.player.shape.friction = PLAYER_FRICTION * 15
else: # Add force to the player, and set the player friction to zero
self.force = (PLAYER_MOVE_FORCE, 0)
self.player.shape.friction = 0
if not self.right_pressed and not self.left_pressed and not self.up_pressed:
#If no directions pressed, stop player
self.force = (0, 0)
self.player.shape.friction = PLAYER_FRICTION * 15 # Greatly increase friction so player stops instead of sliding
# Enforce player speed limit
if self.player.body.velocity[0] >= PLAYER_SPEED_LIMIT:
self.force = (-500, 0)
if self.player.body.velocity[0] <= -PLAYER_SPEED_LIMIT:
self.force = (500, 0)
# If we have force to apply to the player (from hitting the arrow
# keys), apply it.
self.player.body.apply_force_at_local_point(self.force, (0, 0))
# Update player sprites
self.player.update(self.frame_count) # Pass in frame_count so we can decide which frame of animation to use
# Check sprites
for sprite in self.dynamic_sprite_list:
if sprite.shape.body.position.y < 0: # Check for sprites that fall off the screen.
# Remove sprites from physics space
self.space.remove(sprite.shape, sprite.shape.body)
# Remove sprites from physics list
sprite.remove_from_sprite_lists()
if sprite.shape.name == "Pymunk" and sprite.shape.HITCOUNT >= CONTAINER_HEALTH / 2: # Change texture of crate if 50% damaged
broken_texture = arcade.load_texture("./images/tiles/boxCrate_single.png")
sprite.texture = broken_texture
# print("Damanged crate")
# if sprite.shape.name:
# print(sprite.shape.name)
if sprite.shape.name == "Pymunk" and sprite.shape.HITCOUNT >= CONTAINER_HEALTH: # Destroy container if hit CONTAINER_HEALTH times
# logging.info("Destroying shape %s", sprite.shape)
self.emitters.append(explosion(sprite.shape.body.position[0], sprite.shape.body.position[1])) # Make an explosion
self.space.remove(sprite.body, sprite.shape)
sprite.remove_from_sprite_lists()
# print(len(self.space.shapes))
arcade.play_sound(self.explode_sound)
# Kill random pod!
delete_thread = threading.Thread(target=self.kill_pod)
delete_thread.start()
# Check if we need to teleport
self.check_teleport()
# Update emitters
emitters_to_update = self.emitters.copy()
for e in emitters_to_update:
e.update()
# remove emitters that can be reaped
to_del = [e for e in emitters_to_update if e.can_reap()]
for e in to_del:
self.emitters.remove(e)
# Check for balls that fall off the screen
for sprite in self.ball_sprite_list:
if sprite.pymunk_shape.body.position.y < 0:
# Remove balls from physics space
self.space.remove(sprite.pymunk_shape, sprite.pymunk_shape.body)
# Remove balls from physics list
sprite.remove_from_sprite_lists()
# Move ball sprites to where physics objects are
for sprite in self.ball_sprite_list:
sprite.center_x = sprite.pymunk_shape.body.position.x
sprite.center_y = sprite.pymunk_shape.body.position.y
sprite.angle = math.degrees(sprite.pymunk_shape.body.angle)
# Update physics
# Use a constant time step, don't use delta_time
# http://www.pymunk.org/en/latest/overview.html#game-loop-moving-time-forward
self.space.step(1 / 60.0)
# If we are dragging an object, make sure it stays with the mouse. Otherwise
# gravity will drag it down.
if self.shape_being_dragged is not None:
self.shape_being_dragged.shape.body.position = self.last_mouse_position
self.shape_being_dragged.shape.body.velocity = 0, 0
# Resync the sprites to the physics objects that shadow them
resync_physics_sprites(self.dynamic_sprite_list)
# Scroll the viewport if needed
self.scroll_viewport()
# Save the time it took to do this.
self.processing_time = timeit.default_timer() - start_time
def check_teleport(self):
''' See if we need to warp back to start of level '''
# print(self.player.position)
if self.player.position[0] > constants.WORLD_SIZE: # Need to teleport player
self.player.body.position = pymunk.Vec2d(-constants.WORLD_SIZE, self.player.body.position[1])
if self.player.position[0] < -constants.WORLD_SIZE: # Need to teleport player
self.player.body.position = pymunk.Vec2d(constants.WORLD_SIZE, self.player.body.position[1])
# Check if crates need warping
for sprite in self.dynamic_sprite_list:
if sprite.shape.name == "Pymunk" and sprite.body.position[0] > constants.WORLD_SIZE:
sprite.body.position = pymunk.Vec2d(-constants.WORLD_SIZE, self.player.body.position[1])
# print("Sprite out of bounds")
if sprite.shape.name == "Pymunk" and sprite.body.position[0] < -constants.WORLD_SIZE:
sprite.body.position = pymunk.Vec2d(constants.WORLD_SIZE, self.player.body.position[1])
# print("Sprite out of bounds")
if sprite.shape.name == "Pymunk" and sprite.body.position[1] < 0:
# print("sprite fell off world, removing")
self.space.remove(sprite.body, sprite.shape)
sprite.remove_from_sprite_lists()
def kill_pod(self):
''' Deletes pod on kubernetes, then removes crate sprite from game '''
if constants.OFFLINE_MODE == False:
P1, P2 = list_pods()
self.LAST_POD_KILLED = delete_pod(P1, P2)
def punch(self):
''' Punch a crate '''
# --- Punch left
# See if we have a physics object to our left
self.player.punching = True
check_point = (self.player.right + 40, self.player.center_y)
shape_list = self.space.point_query(check_point, 1, pymunk.ShapeFilter())
# Apply force to any object to our left
for shape in shape_list:
# print(shape.shape.name)
arcade.play_sound(self.punch_sound)
shape.shape.body.apply_impulse_at_world_point((constants.PLAYER_PUNCH_IMPULSE, constants.PLAYER_PUNCH_IMPULSE),
check_point)
# Hit counter
shape.shape.HITCOUNT += 1
# logging.info("Punched shape R%s x %s", shape.shape, shape.shape.HITCOUNT)
# --- Punch right
# See if we have a physics object to our left
check_point = (self.player.left - 40, self.player.center_y)
shape_list = self.space.point_query(check_point, 1, pymunk.ShapeFilter())
# Apply force to any object to our right
for shape in shape_list:
arcade.play_sound(self.punch_sound)
shape.shape.body.apply_impulse_at_world_point((-constants.PLAYER_PUNCH_IMPULSE, constants.PLAYER_PUNCH_IMPULSE),
check_point)
# Hit counter
shape.shape.HITCOUNT += 1
# logging.info("Punched shape L%s x %s", shape.shape, shape.shape.HITCOUNT)
def grab(self):
""" Grab something """
# See if we have a physics object to our right
check_point = (self.player.right + 40, self.player.center_y)
shape_list = self.space.point_query(check_point, 1, pymunk.ShapeFilter())
# Create a joint for an item to our right
for shape in shape_list:
self.grab_joint = pymunk.PinJoint(self.player.shape.body, shape.shape.body)
self.space.add(self.grab_joint)
def let_go(self):
""" Let go of whatever we are holding """
if self.grab_joint:
self.space.remove(self.grab_joint)
self.grab_joint = None
def on_key_press(self, symbol: int, modifiers: int):
""" Handle keyboard presses. """
if symbol == arcade.key.UP:
self.up_pressed = True
elif symbol == arcade.key.DOWN:
self.down_pressed = True
elif symbol == arcade.key.LEFT:
self.left_pressed = True
elif symbol == arcade.key.RIGHT:
self.right_pressed = True
elif symbol == arcade.key.SPACE:
self.punch()
elif symbol == arcade.key.G:
self.grab()
elif symbol == arcade.key.R:
logging.info("Resetting game")
self.setup()
elif symbol == arcade.key.ESCAPE:
logging.info("Closing game")
self.close()
elif symbol == arcade.key.PLUS or symbol == arcade.key.EQUAL:
constants.PLAYER_PUNCH_IMPULSE = constants.PLAYER_PUNCH_IMPULSE * 1.1
logging.info("Increased punch force: %s", constants.PLAYER_PUNCH_IMPULSE)
elif symbol == arcade.key.MINUS:
constants.PLAYER_PUNCH_IMPULSE = constants.PLAYER_PUNCH_IMPULSE / 1.1
logging.info("Decreasing punch force: %s", constants.PLAYER_PUNCH_IMPULSE)
def on_key_release(self, symbol: int, modifiers: int):
""" Handle keyboard releases. """
if symbol == arcade.key.UP:
self.up_pressed = False
elif symbol == arcade.key.DOWN:
self.down_pressed = False
elif symbol == arcade.key.LEFT:
self.left_pressed = False
elif symbol == arcade.key.RIGHT:
self.right_pressed = False
# if symbol == arcade.key.RIGHT:
# # Remove force from the player, and set the player friction to a high number so she stops
# self.force = (0, 0)
# self.player.shape.friction = 15
# elif symbol == arcade.key.LEFT:
# # Remove force from the player, and set the player friction to a high number so she stops
# self.force = (0, 0)
# self.player.shape.friction = 15
elif symbol == arcade.key.G:
self.let_go()
def explosion(x, y):
""" Create an explosion when crate destroyed """
e = arcade.Emitter(
center_xy=(x, y),
emit_controller=arcade.EmitterIntervalWithTime(constants.DEFAULT_EMIT_INTERVAL * 1.25, constants.DEFAULT_EMIT_DURATION / 6),
particle_factory=lambda emitter: arcade.LifetimeParticle(
filename_or_texture=constants.E_TEXTURE,
change_xy=arcade.rand_in_circle((0.0, 0.0), constants.PARTICLE_SPEED_FAST * 6),
lifetime=random.uniform(0.05, 1),
scale=random.uniform(0.05, 0.3),
alpha=random.uniform(64, 250),
change_angle=random.uniform(-30, 30),
angle=random.uniform(0, 360)
)
)
return e
def main():
# Process arguments
parser = argparse.ArgumentParser(description='A Chaos Monkey for your Kubernetes cluster!')
parser.add_argument("--offline", default="no", help="Set to yes to enable offline mode")
parser.add_argument('-e','--exclude', nargs='*', default="", help='<Optional> Space-separated list of namespaces to NOT target')
args = parser.parse_args()
offline = args.offline
if offline != "no":
logging.info("Starting in offline mode")
constants.OFFLINE_MODE = True
else:
logging.info("Starting in online mode")
constants.OFFLINE_MODE = False
constants.EXCLUDES_LIST = args.exclude
logging.info("Excluding namespaces: %s", constants.EXCLUDES_LIST)
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
signal(SIGINT, exit_handler)
main()
|
tree_index_builder.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.distributed.fleet.proto import index_dataset_pb2
import numpy as np
import struct
import argparse
import os
import time
import collections
import multiprocessing as mp
from sklearn.cluster import KMeans
class TreeIndexBuilder:
def __init__(self):
self.branch = 2
self.timeout = 5
def build_by_category(self, input_filename, output_filename):
class Item:
def __init__(self, item_id, cat_id):
self.item_id = item_id
self.cat_id = cat_id
self.code = 0
def __lt__(self, other):
return self.cat_id < other.cat_id or \
(self.cat_id == other.cat_id and
self.item_id < other.item_id)
items = []
item_id_set = set()
with open(input_filename, 'r') as f:
for line in f:
iterobj = line.split()
item_id = int(iterobj[0])
cat_id = int(iterobj[1])
if item_id not in item_id_set:
items.append(Item(item_id, cat_id))
item_id_set.add(item_id)
del item_id_set
items.sort()
def gen_code(start, end, code):
if end <= start:
return
if end == start + 1:
items[start].code = code
return
num = int((end - start) / self.branch)
remain = int((end - start) % self.branch)
for i in range(self.branch):
_sub_end = start + (i + 1) * num
if (remain > 0):
remain -= 1
_sub_end += 1
_sub_end = min(_sub_end, end)
gen_code(start, _sub_end, self.branch * code + self.branch - i)
start = _sub_end
gen_code(0, len(items), 0)
ids = np.array([item.item_id for item in items])
codes = np.array([item.code for item in items])
self.build(output_filename, ids, codes)
def tree_init_by_kmeans(self, input_filename, output_filename, parall=1):
t1 = time.time()
ids = list()
data = list()
with open(input_filename) as f:
for line in f:
arr = line.split(',')
if not arr:
break
ids.append(int(arr[0]))
vector = list()
for i in range(1, len(arr)):
vector.append(float(arr[i]))
data.append(vector)
self.ids = np.array(ids)
self.data = np.array(data)
t2 = time.time()
print("Read data done, {} records read, elapsed: {}".format(
len(ids), t2 - t1))
queue = mp.Queue()
queue.put((0, np.array(range(len(self.ids)))))
processes = []
pipes = []
for _ in range(parall):
a, b = mp.Pipe()
p = mp.Process(target=self._train, args=(b, queue))
processes.append(p)
pipes.append(a)
p.start()
self.codes = np.zeros((len(self.ids), ), dtype=np.int64)
for pipe in pipes:
codes = pipe.recv()
for i in range(len(codes)):
if codes[i] > 0:
self.codes[i] = codes[i]
for p in processes:
p.join()
assert (queue.empty())
self.build(output_filename, self.ids, self.codes, data=self.data)
def _train(self, pipe, queue):
last_size = -1
catch_time = 0
processed = False
code = np.zeros((len(self.ids), ), dtype=np.int64)
while True:
for _ in range(5):
try:
pcode, index = queue.get(timeout=self.timeout)
except:
index = None
if index is not None:
break
if index is None:
if processed and (last_size <= 1024 or catch_time >= 3):
print("Process {} exits".format(os.getpid()))
break
else:
print("Got empty job, pid: {}, time: {}".format(os.getpid(
), catch_time))
catch_time += 1
continue
processed = True
catch_time = 0
last_size = len(index)
if last_size <= 1024:
self._minbatch(pcode, index, code)
else:
tstart = time.time()
left_index, right_index = self._cluster(index)
if last_size > 1024:
print("Train iteration done, pcode:{}, "
"data size: {}, elapsed time: {}"
.format(pcode, len(index), time.time() - tstart))
self.timeout = int(0.4 * self.timeout + 0.6 * (time.time() -
tstart))
if self.timeout < 5:
self.timeout = 5
if len(left_index) > 1:
queue.put((2 * pcode + 1, left_index))
if len(right_index) > 1:
queue.put((2 * pcode + 2, right_index))
process_count = 0
for c in code:
if c > 0:
process_count += 1
print("Process {} process {} items".format(os.getpid(), process_count))
pipe.send(code)
def _minbatch(self, pcode, index, code):
dq = collections.deque()
dq.append((pcode, index))
batch_size = len(index)
tstart = time.time()
while dq:
pcode, index = dq.popleft()
if len(index) == 2:
code[index[0]] = 2 * pcode + 1
code[index[1]] = 2 * pcode + 2
continue
left_index, right_index = self._cluster(index)
if len(left_index) > 1:
dq.append((2 * pcode + 1, left_index))
elif len(left_index) == 1:
code[left_index] = 2 * pcode + 1
if len(right_index) > 1:
dq.append((2 * pcode + 2, right_index))
elif len(right_index) == 1:
code[right_index] = 2 * pcode + 2
print("Minbatch, batch size: {}, elapsed: {}".format(
batch_size, time.time() - tstart))
def _cluster(self, index):
data = self.data[index]
kmeans = KMeans(n_clusters=2, random_state=0).fit(data)
labels = kmeans.labels_
l_i = np.where(labels == 0)[0]
r_i = np.where(labels == 1)[0]
left_index = index[l_i]
right_index = index[r_i]
if len(right_index) - len(left_index) > 1:
distances = kmeans.transform(data[r_i])
left_index, right_index = self._rebalance(left_index, right_index,
distances[:, 1])
elif len(left_index) - len(right_index) > 1:
distances = kmeans.transform(data[l_i])
left_index, right_index = self._rebalance(right_index, left_index,
distances[:, 0])
return left_index, right_index
def _rebalance(self, lindex, rindex, distances):
sorted_index = rindex[np.argsort(distances)[::-1]]
idx = np.concatenate((lindex, sorted_index))
mid = int(len(idx) / 2)
return idx[mid:], idx[:mid]
def build(self, output_filename, ids, codes, data=None, id_offset=None):
# process id offset
if not id_offset:
max_id = 0
for id in ids:
if id > max_id:
max_id = id
id_offset = max_id + 1
# sort by codes
argindex = np.argsort(codes)
codes = codes[argindex]
ids = ids[argindex]
# Trick, make all leaf nodes to be in same level
min_code = 0
max_code = codes[-1]
while max_code > 0:
min_code = min_code * self.branch + 1
max_code = int((max_code - 1) / self.branch)
for i in range(len(codes)):
while codes[i] < min_code:
codes[i] = codes[i] * self.branch + 1
filter_set = set()
max_level = 0
tree_meta = index_dataset_pb2.TreeMeta()
with open(output_filename, 'wb') as f:
for id, code in zip(ids, codes):
node = index_dataset_pb2.IndexNode()
node.id = id
node.is_leaf = True
node.probability = 1.0
kv_item = index_dataset_pb2.KVItem()
kv_item.key = self._make_key(code)
kv_item.value = node.SerializeToString()
self._write_kv(f, kv_item.SerializeToString())
ancessors = self._ancessors(code)
if len(ancessors) + 1 > max_level:
max_level = len(ancessors) + 1
for ancessor in ancessors:
if ancessor not in filter_set:
node = index_dataset_pb2.IndexNode()
node.id = id_offset + ancessor # id = id_offset + code
node.is_leaf = False
node.probability = 1.0
kv_item = index_dataset_pb2.KVItem()
kv_item.key = self._make_key(ancessor)
kv_item.value = node.SerializeToString()
self._write_kv(f, kv_item.SerializeToString())
filter_set.add(ancessor)
tree_meta.branch = self.branch
tree_meta.height = max_level
kv_item = index_dataset_pb2.KVItem()
kv_item.key = '.tree_meta'.encode('utf-8')
kv_item.value = tree_meta.SerializeToString()
self._write_kv(f, kv_item.SerializeToString())
def _ancessors(self, code):
ancs = []
while code > 0:
code = int((code - 1) / self.branch)
ancs.append(code)
return ancs
def _make_key(self, code):
return str(code).encode('utf-8')
def _write_kv(self, fwr, message):
fwr.write(struct.pack('i', len(message)))
fwr.write(message)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="TreeIndexBuiler")
parser.add_argument(
"--parallel",
required=False,
type=int,
default=12,
help="parallel nums.")
parser.add_argument(
"--mode",
required=True,
choices=['by_category', 'by_kmeans'],
help="mode")
parser.add_argument("--input", required=True, help="input filename")
parser.add_argument("--output", required=True, help="output filename")
args = parser.parse_args()
if args.mode == "by_category":
builder = TreeIndexBuilder()
builder.build_by_category(args.input, args.output)
elif args.mode == "by_kmeans":
builder = TreeIndexBuilder()
builder.tree_init_by_kmeans(args.input, args.output, args.parallel)
|
led.py
|
"""
by Denexapp
"""
import threading
import time
import RPi.GPIO as GPIO
class led():
def __init__(self, pin, period, invert=False):
self.stop = True
self.on = 0
self.off = 1
if invert:
self.on = 1
self.off = 0
GPIO.setmode(GPIO.BCM)
self.pin = pin
self.period = period
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, self.off)
def start_blink(self):
if self.stop:
thread = threading.Thread(target=self.__start_blink_action)
thread.daemon = True
thread.start()
def stop_blink(self):
GPIO.output(self.pin, self.off)
self.stop = True
def __start_blink_action(self):
self.stop = False
if self.period != 0:
while True:
GPIO.output(self.pin, self.on)
time.sleep(self.period/1000)
if self.stop:
break
GPIO.output(self.pin, self.off)
time.sleep(self.period/1000)
if self.stop:
break
else:
GPIO.output(self.pin, self.on)
|
multiprocessing_env.py
|
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2022/4/19 10:25 下午
# Description :
"""
# 该代码来自 openai baseline,用于多线程环境
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
|
handler.py
|
import io
import json
import logging
import socket
import struct
import threading
import traceback
import weakref
import paramiko
import tornado.web
from tornado.ioloop import IOLoop
from tornado.util import basestring_type
from webssh.worker import Worker, recycle_worker, workers
try:
from concurrent.futures import Future
except ImportError:
from tornado.concurrent import Future
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
DELAY = 3
class MixinHandler(object):
def get_real_client_addr(self):
ip = self.request.headers.get('X-Real-Ip')
port = self.request.headers.get('X-Real-Port')
if ip is None and port is None:
return
try:
port = int(port)
except (TypeError, ValueError):
pass
else:
if ip: # does not validate ip and port here
return (ip, port)
logging.warning('Bad nginx configuration.')
return False
class IndexHandler(MixinHandler, tornado.web.RequestHandler):
def initialize(self, loop, policy, host_keys_settings):
self.loop = loop
self.policy = policy
self.host_keys_settings = host_keys_settings
def get_privatekey(self):
try:
data = self.request.files.get('privatekey')[0]['body']
except TypeError:
return
return data.decode('utf-8')
@classmethod
def get_specific_pkey(cls, pkeycls, privatekey, password):
logging.info('Trying {}'.format(pkeycls.__name__))
try:
pkey = pkeycls.from_private_key(io.StringIO(privatekey),
password=password)
except paramiko.PasswordRequiredException:
raise ValueError('Need password to decrypt the private key.')
except paramiko.SSHException:
pass
else:
return pkey
@classmethod
def get_pkey_obj(cls, privatekey, password):
password = password.encode('utf-8') if password else None
pkey = cls.get_specific_pkey(paramiko.RSAKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.DSSKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.ECDSAKey, privatekey, password)\
or cls.get_specific_pkey(paramiko.Ed25519Key, privatekey,
password)
if not pkey:
raise ValueError('Not a valid private key file or '
'wrong password for decrypting the private key.')
return pkey
def get_port(self):
value = self.get_value('port')
try:
port = int(value)
except ValueError:
port = 0
if 0 < port < 65536:
return port
raise ValueError('Invalid port {}'.format(value))
def get_value(self, name):
value = self.get_argument(name)
if not value:
raise ValueError('Empty {}'.format(name))
return value
def get_args(self):
hostname = self.get_value('hostname')
port = self.get_port()
username = self.get_value('username')
password = self.get_argument('password')
privatekey = self.get_privatekey()
pkey = self.get_pkey_obj(privatekey, password) if privatekey else None
args = (hostname, port, username, password, pkey)
logging.debug(args)
return args
def get_client_addr(self):
return self.get_real_client_addr() or self.request.connection.stream.\
socket.getpeername()
def ssh_connect(self):
ssh = paramiko.SSHClient()
ssh._system_host_keys = self.host_keys_settings['system_host_keys']
ssh._host_keys = self.host_keys_settings['host_keys']
ssh._host_keys_filename = self.host_keys_settings['host_keys_filename']
ssh.set_missing_host_key_policy(self.policy)
args = self.get_args()
dst_addr = (args[0], args[1])
logging.info('Connecting to {}:{}'.format(*dst_addr))
try:
ssh.connect(*args, timeout=6)
except socket.error:
raise ValueError('Unable to connect to {}:{}'.format(*dst_addr))
except paramiko.BadAuthenticationType:
raise ValueError('SSH authentication failed.')
except paramiko.BadHostKeyException:
raise ValueError('Bad host key.')
chan = ssh.invoke_shell(term='xterm')
chan.setblocking(0)
worker = Worker(self.loop, ssh, chan, dst_addr)
worker.src_addr = self.get_client_addr()
return worker
def ssh_connect_wrapped(self, future):
try:
worker = self.ssh_connect()
except Exception as exc:
logging.error(traceback.format_exc())
future.set_exception(exc)
else:
future.set_result(worker)
def get(self):
self.render('index.html')
@tornado.gen.coroutine
def post(self):
worker_id = None
status = None
future = Future()
t = threading.Thread(target=self.ssh_connect_wrapped, args=(future,))
t.setDaemon(True)
t.start()
try:
worker = yield future
except Exception as exc:
status = str(exc)
else:
worker_id = worker.id
workers[worker_id] = worker
self.loop.call_later(DELAY, recycle_worker, worker)
self.write(dict(id=worker_id, status=status))
class WsockHandler(MixinHandler, tornado.websocket.WebSocketHandler):
def initialize(self, loop):
self.loop = loop
self.worker_ref = None
def get_client_addr(self):
return self.get_real_client_addr() or self.stream.socket.getpeername()
def open(self):
self.src_addr = self.get_client_addr()
logging.info('Connected from {}:{}'.format(*self.src_addr))
worker = workers.get(self.get_argument('id'))
if worker and worker.src_addr[0] == self.src_addr[0]:
workers.pop(worker.id)
self.set_nodelay(True)
worker.set_handler(self)
self.worker_ref = weakref.ref(worker)
self.loop.add_handler(worker.fd, worker, IOLoop.READ)
else:
self.close(reason='Websocket authentication failed.')
def on_message(self, message):
logging.debug('{!r} from {}:{}'.format(message, *self.src_addr))
worker = self.worker_ref()
try:
msg = json.loads(message)
except JSONDecodeError:
return
if not isinstance(msg, dict):
return
resize = msg.get('resize')
if resize:
try:
worker.chan.resize_pty(*resize)
except (TypeError, struct.error, paramiko.SSHException):
pass
data = msg.get('data')
if data and isinstance(data, basestring_type):
worker.data_to_dst.append(data)
worker.on_write()
def on_close(self):
logging.info('Disconnected from {}:{}'.format(*self.src_addr))
worker = self.worker_ref() if self.worker_ref else None
if worker:
if self.close_reason is None:
self.close_reason = 'client disconnected'
worker.close(reason=self.close_reason)
|
test_fork_locking.py
|
import logging
import os
import time
import threading
import unittest
import _fork_locking
#import _dummy_fork_locking as _fork_locking
class ForkLockingHelpers(object):
LockActionsCounter = 0
ForkActionsCounter = 0
LockActionsGuard = threading.Lock()
@classmethod
def LockActionsThreadProc(cls):
lock = threading.Lock()
for _ in range(1000):
_fork_locking.acquire_lock()
try:
with lock:
time.sleep(0.001)
logging.debug("lock action done")
with cls.LockActionsGuard:
cls.LockActionsCounter += 1
finally:
_fork_locking.release_lock()
time.sleep(0.001)
@classmethod
def ForkActionsThreadProc(cls):
for _ in range(1000):
_fork_locking.acquire_fork()
try:
pid = os.fork()
if pid == 0:
with cls.LockActionsGuard:
logging.debug("LockActionsCounter value is %d", cls.LockActionsCounter)
os._exit(0)
assert pid > 0
os.waitpid(pid, 0)
cls.ForkActionsCounter += 1
finally:
_fork_locking.release_fork()
time.sleep(0.001)
class TestForkLocking(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.INFO)
def testForkLocking(self):
allThreads = [threading.Thread(target=ForkLockingHelpers.LockActionsThreadProc) for _ in range(10)]
allThreads += [threading.Thread(target=ForkLockingHelpers.ForkActionsThreadProc)]
stTime = time.time()
for th in allThreads:
th.start()
for th in allThreads:
th.join()
eTime = time.time()
logging.info("testForkLocking time %.4f", eTime - stTime)
self.assertEqual(ForkLockingHelpers.LockActionsCounter, 10000)
self.assertEqual(ForkLockingHelpers.ForkActionsCounter, 1000)
if __name__ == "__main__":
unittest.main()
|
event.py
|
# coding=utf8
import time
import datetime
import json
import uuid
import sys
import threading
import zmq
import os
import websocket
from log import get_logger
from load import mod_load
from dog import WATCH_DIR
sys.path.append(WATCH_DIR)
class StrackEvent(websocket.WebSocketApp):
def __init__(self, host, interval=30):
super(StrackEvent, self).__init__(host, on_message=self.on_message, on_open=self.on_open, on_close=self.on_close)
self.ping_interval = interval
self.logger = get_logger()
self.action_list = {}
self.mod_list = {}
self.zmq_context = zmq.Context().socket(zmq.PUSH)
self.zmq_context.bind("tcp://127.0.0.1:5000")
self.zmq_threading = threading.Thread(target=self.start_zmq)
self.zmq_threading.start()
def add_action(self, name):
if not name.startswith("__") and name.endswith(".py"):
name = os.path.basename(name)
mod, func = mod_load(name)
self.action_list[name] = func
self.mod_list[name] = mod
def reload_action(self, name):
if not name.startswith("__") and name.endswith(".py"):
name = os.path.basename(name)
print(self.mod_list.keys())
reload(self.mod_list[name])
def rm_action(self, name):
if not name.startswith("__") and name.endswith(".py"):
name = os.path.basename(name)
try:
del self.action_list[name]
del self.mod_list[name]
except:
pass
def load_action(self):
for f in os.listdir(WATCH_DIR):
if not f.startswith("__") and f.endswith(".py"):
mod, func = mod_load(f)
self.action_list[f] = func
self.mod_list[f] = mod
def on_message(self, message):
# json_msg = json.loads(message)
# print("22222222222222222")
# print(json_msg)
# print(self.action_list)
# if json_msg['type'] == "built_in":
self.zmq_context.send(message)
# map(lambda f: f(json_msg), self.action_list.values())
def on_error(self, error):
self.logger.error(str(error))
def on_close(self):
self.logger.info(datetime.datetime.now())
self.logger.info("################################ ws closed ###############################")
def on_open(self):
uid = str(uuid.uuid4())
bind_data = {'method': 'bind', 'data': {'uid': uid, "group": "eventlog"}}
print(json.dumps(bind_data))
self.send(json.dumps(bind_data))
self.logger.info(datetime.datetime.now())
self.logger.info("################################ ws opened ###############################")
self.logger.info("###############uuid %s ################" % uid)
def start(self):
self.load_action()
self.run_forever(ping_interval=self.ping_interval, ping_timeout=10)
def start_zmq(self):
context = zmq.Context()
socket = context.socket(zmq.PULL)
socket.connect("tcp://127.0.0.1:5000")
while True:
message = socket.recv()
json_msg = json.loads(message)
map(lambda f: f(self.logger, json_msg), self.action_list.values())
time.sleep(1)
if __name__ == "__main__":
websocket.enableTrace(True)
if len(sys.argv) < 2:
host = "ws://192.168.31.108:9083?sign=7dc70a53a6cc0fff5b02c47c070c471f"
else:
host = sys.argv[1]
ws = StrackEvent(host)
ws.start()
|
01_.py
|
"""
Parte 1
"""
import threading
print(threading.active_count()) # Retorna a contagem das threads
print(threading.enumerate()) # Retonar uma iterável das threads
"""
parte 2
"""
import threading
print(threading.enumerate()[0].name) # Nome da thread
print(threading.enumerate()[0].is_alive()) # Stado da thread
"""
Parte 3
Criando uma Thread
"""
import threading
from time import sleep
def wait():
sleep(2)
t = threading.Thread(target=wait, name='wait')
t.start()
print(threading.enumerate()[1].name)
print(threading.enumerate()[1].is_alive())
"""
parte 4
"""
import threading
from time import sleep
def wait():
count = 0
while True:
print(count)
count += 1
sleep(0.1)
t = threading.Thread(target=wait, name='wait')
t.start()
print(threading.enumerate()[1].name)
print(threading.enumerate()[1].is_alive())
"""
parte 5
"""
import threading
from time import sleep
def wait():
sleep(2)
t = threading.Thread(target=wait, name='wait', daemon=True)
t.start()
print(threading.enumerate()[1].name)
print(threading.enumerate()[1].is_alive())
t.join()
"""
parte 6
"""
import threading
from time import sleep
def wait():
sleep(2)
print('acabou')
t1 = threading.Thread(target=wait, name='wait', daemon=True)
t1.start()
print(threading.enumerate()[1].name)
print(threading.enumerate()[1].is_alive())
t2 = threading.Thread(target=wait, name='wait')
t2.start()
print(threading.enumerate()[2].name)
print(threading.enumerate()[2].is_alive())
"""
parte 7
"""
import threading
from time import sleep
def wait():
sleep(2)
print('acabou')
class myThread(threading.Thread):
def __init__(self, target, name='MyThread'):
super().__init__()
self.target = target
self.name = name
def run(self):
self.target()
t = myThread(wait, 'wait')
t.start()
print(threading.enumerate()[1].name)
print(threading.enumerate()[1].is_alive())
|
decorator.py
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
import threading
import time
import os
import sys
import platform
import functools
def chdir(dir_path=''):
"""自动调用os.chdir
:param dir_path:
:return:
"""
def _chdir(func):
@functools.wraps(func)
def __chdir(*args, **kwargs):
os.chdir(dir_path)
return func(*args, **kwargs)
return __chdir
return _chdir
def retry(times=5):
"""一个装饰器,可以设置报错重试次数
:param times: 最多重试次数
:return:
"""
def _retry(func):
@functools.wraps(func)
def __retry(*args, **kwargs):
retry_times = 0
while retry_times <= times:
try:
res = func(*args, **kwargs)
return res
except Exception:
print(sys.exc_info()[1])
retry_times += 1
if retry_times <= times:
print('1秒后重试第{0}次'.format(retry_times))
time.sleep(1)
else:
print('max try,can not fix')
import traceback
traceback.print_exc()
return None
return __retry
return _retry
def count_running_time(func):
"""装饰器函数,统计函数的运行耗时
:param func:
:return:
"""
@functools.wraps(func)
def _count_running_time(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
print(('cost time :{:.3f}'.format(time.time() - start)))
return res
return _count_running_time
def auto_next(func):
"""可以给协程用,自动next一次
:param func:
:return:
"""
@functools.wraps(func)
def _auto_next(*args, **kwargs):
g = func(*args, **kwargs)
next(g)
return g
return _auto_next
def check_adb(func):
@functools.wraps(func)
def _check_adb(*args, **kwargs):
@cache_result()
def get_adb_devices():
from util.common import run_cmd
return run_cmd('adb devices')
result = get_adb_devices()
if (len(result)) < 2:
print('当前没有连接上手机')
return None
return func(*args, **kwargs)
return _check_adb
def cache_result(times=60):
def _wrap(func):
@functools.wraps(func)
def __wrap(*args, **kwargs):
if hasattr(func, "__last_call_result__") and time.time() - func.__last_call_time__ < times:
print(func.__last_call_result__)
return func.__last_call_result__
else:
result = func(*args, **kwargs)
func.__last_call_result__ = result
func.__last_call_time__ = time.time()
return result
return __wrap
return _wrap
def windows(obj):
"""如果非windows系统,抛出异常"""
if not platform.platform().startswith('Windows'):
raise Exception("仅支持在Windows下使用")
return obj
class Singleton:
"""单例模式的一种其实,其实Python最佳的单例方式还是通过模块来实现
用法如下:
@Singleton
class YourClass(object):
"""
def __init__(self, cls):
self.__instance = None
self.__cls = cls
self._lock = threading.Lock()
def __call__(self, *args, **kwargs):
self._lock.acquire()
if self.__instance is None:
self.__instance = self.__cls(*args, **kwargs)
self._lock.release()
return self.__instance
def simple_background_task(func):
@functools.wraps(func)
def _wrap(*args, **kwargs):
threading.Thread(target=func, args=args, kwargs=kwargs).start()
return
return _wrap
|
lock_management.py
|
import threading
shared_resource_with_lock = 0
shared_resource_with_no_lock = 0
COUNT = 100000
shared_resource_lock = threading.Lock()
####LOCK MANAGEMENT##
def increment_with_lock():
global shared_resource_with_lock
for i in range(COUNT):
shared_resource_lock.acquire()
shared_resource_with_lock += 1
shared_resource_lock.release()
def decrement_with_lock():
global shared_resource_with_lock
for i in range(COUNT):
shared_resource_lock.acquire()
shared_resource_with_lock -= 1
shared_resource_lock.release()
####NO LOCK MANAGEMENT ##
def increment_without_lock():
global shared_resource_with_no_lock
for i in range(COUNT):
shared_resource_with_no_lock += 1
def decrement_without_lock():
global shared_resource_with_no_lock
for i in range(COUNT):
shared_resource_with_no_lock -= 1
####the Main program
if __name__ == "__main__":
t1 = threading.Thread(target = increment_with_lock)
t2 = threading.Thread(target = decrement_with_lock)
t3 = threading.Thread(target = increment_without_lock)
t4 = threading.Thread(target = decrement_without_lock)
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
print ("the value of shared variable with lock management is %s"\
%shared_resource_with_lock)
print ("the value of shared variable with race condition is %s"\
%shared_resource_with_no_lock)
"""
Com lock, threads concorrentes não acessam uma variável na memória compartilhada ao mesmo tempo, aguardando até que uma operação seja finalizada para inicializar a outra operação que também acessa a mesma variável na memória compartilhada.
Sem lock, threads concorrentes podem acessar uma variável na memória ao mesmo tempo, podendo haver atribuição incorreta de valores dado o acesso simultâneo a uma variável na memória compartilhada.
Agora uma dúvida: como as variáveis foram declaradas no escopo da thread principal, eles são variáveis da thread principal do processo, que é armazenada na memória do processo, logo, é compartilhada por todas as subthreads dentro da thread principal do processo.
Portanto, o estado de uma thread (ao tentar acessar uma variável na memória comartilhada do processo principal para a thread principal e para as subthreads) pode ser: locked ou unlocked
- Se o estado for unlocked, uma chamada para aquire() muda o estado para locked
- Se o estado for locked, uma chamada para aquire() é bloqueia a thread até que uma outra thread chame release()
- Se o estado for unlocked, uma chamada para release() levanta Runtime Error Error exception
- Se o estado for locked, uma chamda para release() muda o estado da thread para destravado
----
Desvantagens:
1. Locks são subjetivos e podem causar deadlocks
2. Locks possuem vários outros pontos negativos para a aplicação como um todo
3. Locks limita a escalabilidade e compromete a legibilidade do código
4. O uso the lock (travamento) em conflito com a possível necessidade de impor prioridade de acesso a memory compartilhada por vários processos
5. Uma aplicação que contém um lock (travamento) apresenta considerável dificuldade de identificar pontos exatos de erro
"""
|
node.py
|
# Importing socket library in python
import socket
import sys
import time
import threading
import logging
import re
from abc import ABC, abstractmethod
"""
Abstract class with functions to create & handle a node.
"""
class Node(ABC):
def __init__(self):
"""
Must be called by every derived class in the beginning as
super.__init__()
"""
self.HOST = '127.0.0.1' # Constant host IP address
self.LOG_SERVER_PORT = 31418 # Constant port number of log server
self.MSG_FORMAT = 'utf-8' # Constant message format
self.shutdown = False # Flag to indicate node shutdown
def setupNode(self, portNo):
"""
Sets the initial values of the attributes. Must be called by every derived class in __init__
Parameters
----------
portNo : int
Port number on which the node listens
"""
self.portNo = portNo # port number
self.sock = [] # Socket object [serverSocket, clientSocket]
self.clientFlag = True
self.createSocket()
if (self.bindSocket(2)):
# Creating a thread to listen to requests
self.listener = threading.Thread(target=self.listenRqsts)
self.listener.daemon = True
self.listener.start()
else:
self.close()
def createSocket(self):
'''
create socket (self)
'''
try:
self.sock.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self.sock.append(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
except socket.error as msg:
logging.error("Socket creation error: " + str(msg))
def bindSocket(self, listenNo):
'''
Bind socket (self, listenNo)
listenNo ---> Number of connections it will accept
Returns
-------
success : bool
True if the socket is successfully binded to given port number; else False
'''
try:
logging.debug("[PORT BINDED] Binding the Port: " + str(self.portNo))
self.sock[0].bind((self.HOST, self.portNo))
self.sock[0].listen(listenNo)
return True
except socket.error as msg:
logging.error("[ERROR] Socket binding error: " + str(msg))
logging.info("Cannot bind to port number: " + str(self.portNo) + " | Exiting node...")
return False
def listenRqsts(self):
'''
Accept connection from other nodes (self)
Makes 5 attempts to check and accept a connection
'''
allConn = []
allAddr = []
while not self.shutdown:
try:
del allConn[:]
del allAddr[:]
conn, address = self.sock[0].accept()
self.sock[0].setblocking(1) # prevents timeout
allConn.append(conn)
allAddr.append(address)
logging.debug("[NEW CONNECTION] Connection has been established to :" + address[0])
for i in range(len(allConn)):
data = allConn[i].recv(1024)
if len(data) > 0:
logging.debug("[NEW MESSAGE] Message received from Node-" + str(allAddr[i]) + " : " + str(data)[2:-1])
self.processRqst(str(data)[2:-1])
except KeyboardInterrupt:
logging.error("[ERROR] accepting connections. Trying again...")
except socket.error as msg:
if not bool(re.search(".WinError 10038.", str(msg))):
logging.error("[ERROR] Cannot accept any connections: " + str(msg))
self.close()
self.sock[0].close()
logging.debug("Socket closed")
def send(self, msg, port, waitReply=False):
'''
Connect to a node and send message. (Low level function)
Parameters
----------
msg : str
Message to send
port : int
Port number to which message must be sent
waitReply : bool
To wait or not to wait for the reply. Default: False
Returns
-------
success : bool
True if message was sent successfully; else False
'''
try:
if not self.clientFlag:
self.sock[1] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientFlag = True
self.sock[1].connect((self.HOST, port))
self.sock[1].send(msg.encode(self.MSG_FORMAT))
if waitReply:
print(self.sock[1].recv(1024).decode(self.MSG_FORMAT))
return True
except KeyboardInterrupt:
logging.error("[ERROR] Keyboard interrupt detected")
return False
except socket.error as msg:
logging.error("[ERROR] Cannot send message to the target node: " + str(port) + str(msg))
if (port == self.LOG_SERVER_PORT):
logging.fatal("Log server has not been instantiated. Exiting node ...")
self.close()
return False
finally:
self.sock[1].close()
self.clientFlag = False
def close(self):
'''
Closes all the sockets
'''
self.shutdown = True
self.sock[0].close()
if self.clientFlag:
self.sock[1].close()
self.clientFlag = False
@abstractmethod
def processRqst(self, msg):
"""
Processes the request messages obtained by the node. Should only be called from within
listenRqsts function. Must be defined by each of the child class.
Parameters
----------
msg : str
Message string received by the node.
"""
pass
@abstractmethod
def sendMsg(self, msg, nodeId):
"""
Sends message to a nodeID and to LogServer
Parameters
----------
msg : str
nodeID : str
"""
pass
@abstractmethod
def run(self):
"""
Define a while loop that executes till node.shutdown == False.
"""
pass
@abstractmethod
def startNewNetwork(self, nodePortNo):
"""
Start a new P2P network with user defined node and listens to nodePortNo.
Parameters
----------
nodePortNo : int
"""
pass
@abstractmethod
def joinNetwork(self, existingPortNo, nodePortNo):
"""
Join an existing P2P network through a node on the network.
Parameters
----------
existingPortNo : int
Port number on which the existing node is listening
nodePortNo : int
Port number on which the node is listening
"""
pass
if __name__=='__main__':
print('Abstract class. Cannot run the module.')
pass
|
UsageRunner.py
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robotide.usages.commands import FindUsages, FindResourceUsages, FindVariableUsages
from robotide.usages.usagesdialog import UsagesDialog, UsagesDialogWithUserKwNavigation, ResourceImportUsageDialog
from threading import Thread
import wx
import time
class Usages(object):
def __init__(self, controller, highlight, name=None, kw_info=None):
self._name = name or controller.name
self._kw_info = kw_info
self._controller = controller
self._highlight = highlight
self._dlg = self._usages_dialog()
self._worker = Thread(target=self._run)
self._dialog_closed = False
def _usages_dialog(self):
if self._controller.name == self._name:
return UsagesDialogWithUserKwNavigation(self._name, self._highlight, self._controller)
return UsagesDialog(self._name)
def show(self):
self._dlg.add_selection_listener(self._highlight)
self._dlg.Bind(wx.EVT_CLOSE, self._stop)
self._dlg.Show()
self._worker.start()
def _run(self):
wx.CallAfter(self._begin_search)
for usage in self._find_usages():
time.sleep(0) # GIVE SPACE TO OTHER THREADS -- Thread.yield in Java
if self._dialog_closed: return
wx.CallAfter(self._add_usage, usage)
wx.CallAfter(self._end_search)
def _find_usages(self):
return self._controller.execute(FindUsages(self._name, self._kw_info))
def _begin_search(self):
if not self._dialog_closed:
self._dlg.begin_searching()
def _add_usage(self, usage):
if not self._dialog_closed:
self._dlg.add_usage(usage)
def _end_search(self):
if not self._dialog_closed:
self._dlg.end_searching()
def _stop(self, event):
self._dialog_closed = True
event.Skip()
class ResourceFileUsages(Usages):
def __init__(self, controller, highlight):
Usages.__init__(self, controller, highlight)
def _usages_dialog(self):
return ResourceImportUsageDialog(self._controller.display_name,
self._highlight,
self._controller)
def _find_usages(self):
return self._controller.execute(FindResourceUsages())
class VariableUsages(Usages):
def _find_usages(self):
return self._controller.execute(FindVariableUsages(self._name))
|
test_retry_logic.py
|
#!/usr/bin/env python
#
# (c) Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import socket
import sys
import unittest
from contextlib import contextmanager
from copy import deepcopy
from threading import Thread
import requests
from opsramp.base import ApiObject, ApiWrapper
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import StringIO
except ImportError:
# Python 2.x support
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer # noqa
from StringIO import StringIO # noqa
# Define a list of "canned" responses here. These are used to test the retry
# capability of the API client, so that when faced with a response containing a
# HTTP 429 (Too Many Requests) status code, it should keep retrying until some
# maximum limit is reached.
CANNED_RESPONSES = [
{
'code': 429,
'message': b"Failed attempt #1"
},
{
'code': 429,
'message': b"Failed attempt #2"
},
{
'code': 429,
'message': b"Failed attempt #3"
},
{
'code': 200,
'message': b"Succeeded on fourth attempt"
},
]
# Borrowed from https://gist.github.com/mogproject/fc7c4e94ba505e95fa03
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class MockServerRequestHandler(BaseHTTPRequestHandler, object):
def __init__(self, request, client_address, server):
# Creating this reference to the server as that is where the current
# server instance's copies of the canned responses live; a new request
# handler is generated for each request so they can't live here.
self.http_server = server
super(MockServerRequestHandler, self).__init__(
request,
client_address,
server
)
def do_GET(self):
logging.debug("running do_GET()")
if len(self.http_server.canned_responses) > 0:
# We have at least one more "canned" response to send...
response = self.http_server.canned_responses.pop(0)
logging.debug("Sending response: %s" % response)
# Send the header including the status code
self.send_response(code=response['code'])
self.send_header("Content-type", "text/plain")
self.end_headers()
# Write the message body to self.wfile
self.wfile.write(response['message'])
# self.canned_responses = self.canned_responses[1:]
logging.debug(
"%d responses remain" %
len(self.http_server.canned_responses)
)
else:
# If we run out of canned responses just send back a HTTP 500
# (Internal Server Error). The idea is that the retry logic should
# give up before the test server runs out of canned responses and
# that this is here just for completeness.
self.send_response(code=500)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b"Internal Server Error")
return
class MockHttpServer(HTTPServer, object):
def __init__(
self,
server_address,
RequestHandlerClass,
canned_responses
):
# Need to subclass HTTPServer so that I can define a set of canned
# responses that will be consumed by MockServerRequestHandler.
logging.warning("Standing up HTTP Server")
self.canned_responses = canned_responses
super(MockHttpServer, self).__init__(
server_address,
RequestHandlerClass
)
class MockWebClient:
"""Minimal web client to generate requests against our mock web server
"""
def __init__(self, host='localhost', port=80):
self.host = host
self.port = port
def get(self):
return requests.get("http://%s:%d/test" % (self.host, self.port))
class TestMockServer(unittest.TestCase):
def setUp(self):
self.server_port = self.get_free_port()
logging.info("Creating HTTP server on port %d" % self.server_port)
self.mock_server = MockHttpServer(
('localhost', self.server_port),
MockServerRequestHandler,
deepcopy(CANNED_RESPONSES)
)
# Create the server in a separate thread so that it can run in parallel
# with the client (i.e. tests) being run against it.
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
def tearDown(self):
logging.info("Shutting down Mock HTTP server")
self.mock_server.shutdown()
return super(TestMockServer, self).tearDown()
@classmethod
def get_free_port(cls):
"""Get the system to open a TCP socket (and let it choose the port
number). Once it's been created successfully, shut down the empty
server attached to that socket and return the port number to be
"recycled" for use with the test server.
Returns:
int -- Available TCP Port number
"""
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
_, port = s.getsockname()
s.close()
return port
def test__mock_server_works(self):
# Test that the web server returns all canned responses, in order.
client = MockWebClient(host='localhost', port=self.server_port)
for expected in CANNED_RESPONSES:
response = client.get()
self.assertEqual(response.status_code, expected['code'])
self.assertEqual(response.content, expected['message'])
def test__apiwrapper_retry_logic(self):
fake_url = 'http://localhost:%d/' % self.server_port
fake_token = 'ffffffffffffffff'
fake_auth = {
'Authorization': 'Bearer %s' % fake_token,
'Accept': 'application/json'
}
api_object = ApiObject(fake_url, fake_auth.copy())
assert 'ApiObject' in str(api_object)
api_stub = 'whatevs'
api_wrapper = ApiWrapper(api_object, api_stub)
api_endpoint = "foo"
with captured_output() as (out, err):
api_wrapper.api.get("/%s" % api_endpoint)
expected_requests = []
for resp in CANNED_RESPONSES:
req = '"GET /%s/%s HTTP/1.1" %d' % (
api_stub, api_endpoint, resp['code']
)
expected_requests.append(req)
lines = err.getvalue().splitlines()
# Check each GET request to verify that it gets the correct status code
# (basically, 429 three times then a 200) to confirm that the retry
# logic is doing its thing
for index in range(len(lines)):
self.assertIn(expected_requests[index], lines[index])
|
StreamDeck.py
|
# Python Stream Deck Library
# Released under the MIT license
#
# dean [at] fourwalledcubicle [dot] com
# www.fourwalledcubicle.com
#
import threading
import time
from abc import ABC, abstractmethod
from ..Transport.Transport import TransportError
class StreamDeck(ABC):
"""
Represents a physically attached StreamDeck device.
"""
KEY_COUNT = None
KEY_COLS = None
KEY_ROWS = None
KEY_PIXEL_WIDTH = None
KEY_PIXEL_HEIGHT = None
KEY_IMAGE_CODEC = None
KEY_FLIP = None
KEY_ROTATION = None
DECK_TYPE = None
def __init__(self, device):
self.device = device
self.last_key_states = [False] * self.KEY_COUNT
self.read_thread = None
self.run_read_thread = False
self.read_poll_hz = 20
self.key_callback = None
self.update_lock = threading.RLock()
def __del__(self):
"""
Delete handler for the StreamDeck, automatically closing the transport
if it is currently open and terminating the transport reader thread.
"""
try:
self._setup_reader(None)
except (TransportError, ValueError):
pass
try:
self.device.close()
except (TransportError):
pass
def __enter__(self):
"""
Enter handler for the StreamDeck, taking the exclusive update lock on
the deck. This can be used in a `with` statement to ensure that only one
thread is currently updating the deck, even if it is doing multiple
operations (e.g. setting the image on multiple keys).
"""
self.update_lock.acquire()
def __exit__(self, type, value, traceback):
"""
Exit handler for the StreamDeck, releasing the exclusive update lock on
the deck.
"""
self.update_lock.release()
@abstractmethod
def _read_key_states(self):
"""
Reads the raw key states from an attached StreamDeck.
:rtype: list(bool)
:return: List containing the raw key states.
"""
pass
@abstractmethod
def _reset_key_stream(self):
"""
Sends a blank key report to the StreamDeck, resetting the key image
streamer in the device. This prevents previously started partial key
writes that were not completed from corrupting images sent from this
application.
"""
pass
def _extract_string(self, data):
"""
Extracts out a human-readable string from a collection of raw bytes,
removing any trailing whitespace or data after the first NUL byte.
"""
return str(bytes(data), 'ascii', 'replace').partition('\0')[0].rstrip()
def _read(self):
"""
Read handler for the underlying transport, listening for button state
changes on the underlying device, caching the new states and firing off
any registered callbacks.
"""
while self.run_read_thread:
try:
new_key_states = self._read_key_states()
if new_key_states is None:
time.sleep(1.0 / self.read_poll_hz)
continue
if self.key_callback is not None:
for k, (old, new) in enumerate(zip(self.last_key_states, new_key_states)):
if old != new:
self.key_callback(self, k, new)
self.last_key_states = new_key_states
except (TransportError):
self.run_read_thread = False
def _setup_reader(self, callback):
"""
Sets up the internal transport reader thread with the given callback,
for asynchronous processing of HID events from the device. If the thread
already exists, it is terminated and restarted with the new callback
function.
:param function callback: Callback to run on the reader thread.
"""
if self.read_thread is not None:
self.run_read_thread = False
try:
self.read_thread.join()
except RuntimeError:
pass
if callback is not None:
self.run_read_thread = True
self.read_thread = threading.Thread(target=callback)
self.read_thread.daemon = True
self.read_thread.start()
def open(self):
"""
Opens the device for input/output. This must be called prior to setting
or retrieving any device state.
.. seealso:: See :func:`~StreamDeck.close` for the corresponding close method.
"""
self.device.open()
self._reset_key_stream()
self._setup_reader(self._read)
def close(self):
"""
Closes the device for input/output.
.. seealso:: See :func:`~StreamDeck.open` for the corresponding open method.
"""
self.device.close()
def is_open(self):
"""
Indicattes if the StreamDeck device is currently open and ready for use..
:rtype: bool
:return: `True` if the deck is open, `False` otherwise.
"""
return self.device.is_open()
def connected(self):
"""
Indicates if the physical StreamDeck device this instance is attached to
is still connected to the host.
:rtype: bool
:return: `True` if the deck is still connected, `False` otherwise.
"""
return self.device.connected()
def id(self):
"""
Retrieves the physical ID of the attached StreamDeck. This can be used
to differentiate one StreamDeck from another.
:rtype: str
:return: Identifier for the attached device.
"""
return self.device.path()
def key_count(self):
"""
Retrieves number of physical buttons on the attached StreamDeck device.
:rtype: int
:return: Number of physical buttons.
"""
return self.KEY_COUNT
def deck_type(self):
"""
Retrieves the model of Stream Deck.
:rtype: str
:return: String containing the model name of the StreamDeck device..
"""
return self.DECK_TYPE
def key_layout(self):
"""
Retrieves the physical button layout on the attached StreamDeck device.
:rtype: (int, int)
:return (rows, columns): Number of button rows and columns.
"""
return self.KEY_ROWS, self.KEY_COLS
def key_image_format(self):
"""
Retrieves the image format accepted by the attached StreamDeck device.
Images should be given in this format when setting an image on a button.
.. seealso:: See :func:`~StreamDeck.set_key_image` method to update the
image displayed on a StreamDeck button.
:rtype: dict()
:return: Dictionary describing the various image parameters
(size, image format, image mirroring and rotation).
"""
return {
'size': (self.KEY_PIXEL_WIDTH, self.KEY_PIXEL_HEIGHT),
'format': self.KEY_IMAGE_FORMAT,
'flip': self.KEY_FLIP,
'rotation': self.KEY_ROTATION,
}
def set_poll_frequency(self, hz):
"""
Sets the frequency of the button polling reader thread, determining how
often the StreamDeck will be polled for button changes.
A higher frequency will result in a higher CPU usage, but a lower
latency between a physical button press and a event from the library.
:param int hz: Reader thread frequency, in Hz (1-1000).
"""
self.read_poll_hz = min(max(hz, 1), 1000)
def set_key_callback(self, callback):
"""
Sets the callback function called each time a button on the StreamDeck
changes state (either pressed, or released).
.. note:: This callback will be fired from an internal reader thread.
Ensure that the given callback function is thread-safe.
.. note:: Only one callback can be registered at one time.
.. seealso:: See :func:`~StreamDeck.set_key_callback_async` method for
a version compatible with Python 3 `asyncio` asynchronous
functions.
:param function callback: Callback function to fire each time a button
state changes.
"""
self.key_callback = callback
def set_key_callback_async(self, async_callback, loop=None):
"""
Sets the asynchronous callback function called each time a button on the
StreamDeck changes state (either pressed, or released). The given
callback should be compatible with Python 3's `asyncio` routines.
.. note:: The asynchronous callback will be fired in a thread-safe
manner.
.. note:: This will override the callback (if any) set by
:func:`~StreamDeck.set_key_callback`.
:param function async_callback: Asynchronous callback function to fire
each time a button state changes.
:param asyncio.loop loop: Asyncio loop to dispatch the callback into
"""
import asyncio
loop = loop or asyncio.get_event_loop()
def callback(*args):
asyncio.run_coroutine_threadsafe(async_callback(*args), loop)
self.set_key_callback(callback)
def key_states(self):
"""
Retrieves the current states of the buttons on the StreamDeck.
:rtype: list(bool)
:return: List describing the current states of each of the buttons on
the device (`True` if the button is being pressed, `False`
otherwise).
"""
return self.last_key_states
@abstractmethod
def reset(self):
"""
Resets the StreamDeck, clearing all button images and showing the
standby image.
"""
pass
@abstractmethod
def set_brightness(self, percent):
"""
Sets the global screen brightness of the StreamDeck, across all the
physical buttons.
:param int/float percent: brightness percent, from [0-100] as an `int`,
or normalized to [0.0-1.0] as a `float`.
"""
pass
@abstractmethod
def get_serial_number(self):
"""
Gets the serial number of the attached StreamDeck.
:rtype: str
:return: String containing the serial number of the attached device.
"""
pass
@abstractmethod
def get_firmware_version(self):
"""
Gets the firmware version of the attached StreamDeck.
:rtype: str
:return: String containing the firmware version of the attached device.
"""
pass
@abstractmethod
def set_key_image(self, key, image):
"""
Sets the image of a button on the StreamDeck to the given image. The
image being set should be in the correct format for the device, as an
enumerable collection of bytes.
.. seealso:: See :func:`~StreamDeck.get_key_image_format` method for
information on the image format accepted by the device.
:param int key: Index of the button whose image is to be updated.
:param enumerable image: Raw data of the image to set on the button.
If `None`, the key will be cleared to a black
color.
"""
pass
|
test_functional.py
|
try:
from importlib import reload
except ImportError:
pass
import json
import os
import queue
import tempfile
import threading
import pytest
from . import serve
from wptserve import logger
class ServerProcSpy(serve.ServerProc):
instances = None
def start(self, *args, **kwargs):
result = super(ServerProcSpy, self).start(*args, **kwargs)
if ServerProcSpy.instances is not None:
ServerProcSpy.instances.put(self)
return result
serve.ServerProc = ServerProcSpy
@pytest.fixture()
def server_subprocesses():
ServerProcSpy.instances = queue.Queue()
yield ServerProcSpy.instances
ServerProcSpy.instances = None
@pytest.fixture()
def tempfile_name():
fd, name = tempfile.mkstemp()
yield name
os.close(fd)
os.remove(name)
def test_subprocess_exit(server_subprocesses, tempfile_name):
timeout = 30
def target():
# By default, the server initially creates a child process to validate
# local system configuration. That process is unrelated to the behavior
# under test, but at the time of this writing, the parent uses the same
# constructor that is also used to create the long-running processes
# which are relevant to this functionality. Disable the check so that
# the constructor is only used to create relevant processes.
with open(tempfile_name, 'w') as handle:
json.dump({"check_subdomains": False, "bind_address": False}, handle)
# The `logger` module from the wptserver package uses a singleton
# pattern which resists testing. In order to avoid conflicting with
# other tests which rely on that module, pre-existing state is
# discarded through an explicit "reload" operation.
reload(logger)
serve.run(config_path=tempfile_name)
thread = threading.Thread(target=target)
thread.start()
server_subprocesses.get(True, timeout)
subprocess = server_subprocesses.get(True, timeout)
subprocess.stop()
thread.join(timeout)
assert not thread.is_alive()
|
server_test.py
|
import socket
import unittest
import threading
import functools
import time
import asyncore
from glide.server import CommandServer, CommandBaseHandler
PORT = 32767
class Tester(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_proc_handler(self):
client = functools.partial(self.run_client, 'start proc1')
client_thread = threading.Thread(target=client)
client_thread.start()
self.run_server()
client_thread.join()
def run_client(self, command):
time.sleep(1) # TODO need a blocking queue
s = socket.create_connection(('localhost', PORT))
s.send(command)
data = s.recv(1024)
s.close()
asyncore.close_all()
self.assertEquals(data, command)
def run_server(self):
CommandServer(
'0.0.0.0',
PORT,
Tester.Controller(),
Tester.EchoCommandHandler,
)
try:
asyncore.loop()
except:
pass
class Controller(object):
def command(self, command):
return command
class EchoCommandHandler(CommandBaseHandler):
def handle_data(self, data):
assert self.controller.command(data) == data
return data
|
bclient.py
|
import socket
import pickle
import threading
import sys
import hashlib
import os
import OpenSSL
from OpenSSL import SSL
from OpenSSL import crypto
from Crypto import Random
from Crypto.Cipher import AES
from diffiehellman.diffiehellman import DiffieHellman
from OpenCA import createCSR
import Encryptor
import time
BUFSIZE=32768
class Client:
sock= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mujAES=None
cli=None
cislo=0
def __init__(self,adress):
self.sock.connect((adress, 9876))
if os.path.isfile('User.private.pem') == False or os.path.isfile('client1.cert.pem') == False:
createCSR('User','heslo',{'CN':'USER_FQDN'})
self.poslatCertReq()
print("ZADAM SI CERTIFIKAT")
else:
print("NACITAM ZE SLOZKY JAK BOSSs")
self.cert=OpenSSL.crypto.load_certificate(crypto.FILETYPE_PEM,open('client1.cert.pem').read())
self.poslatCert()
self.cli=DiffieHellman()
self.cli.generate_public_key()
#presunout do runu
self.vyzadatKlic()
iThread=threading.Thread(target=self.sendMessage)
iThread.daemon=True
iThread.start()
self.run()
def run(self):
while True:
data=self.sock.recv(BUFSIZE)
#misto na hrani si s daty
if not data:
#ukonceni komunikace
break
elif data[0:1]==b"\x66":
with open('client1.cert.pem','wb') as cert:
cert.write(data[1:])
self.nastavitCert(data[1:])
#self.cert=OpenSSL.crypto.load_certificate(crypto.FILETYPE_PEM,data[1:])
elif data[0:1]==b"\x11":
#kdyz prijde ridici znak x11-posleme na vyzadani klic
self.poslatKlic()
elif data[0:1]==b"\x98":
createCSR('User','heslo',{'CN':'USER_FQDN'})
self.poslatCertReq()
elif data[0:1]==b"\x12":
#kdyz prijde ridici znak x12 tak si nastavime klic ktery nasleduje po tomto bytu
self.nastavitKlic(data[1:])
elif data[0:1]==b"\x13":
#nezasifrovana komunikace
print(data.decode())
#vynuceni DH pokud prijde nezasifrovana zprava
#self.sock.send(b'\x11')
elif data[0:1]==b"\x14":
#nastaveni klice v pripade ze byl vyzadan nebo tak neco
self.jinenastaveniklice(data[1:])
elif data[0:1]==b'\x20':
self.nastavitCert(data[1:])
else:
#vychozi stav- prijdou data bez ridiciho znaku-> predpokladame ze jsou zasifrovana AESem podle dohodnuteho hesla
data=self.mujAES.decrypt(data)
try:
print("client "+str(self.cislo)+":"+data.decode())
except:
continue
def vyzadatCert(self):
self.sock.send(b'\x65')
def nastavitCert(self,data):
#self.cert=OpenSSL.crypto.load_certificate_request(crypto.FILETYPE_PEM, data)
self.cert=OpenSSL.crypto.load_certificate(crypto.FILETYPE_PEM,data)
def poslatCert(self):
text=OpenSSL.crypto.dump_certificate(crypto.FILETYPE_PEM,self.cert)
self.sock.send(b'\x33'+text)
def poslatCertReq(self):
#posle certifikat na podepsani¨
with open('User.CSR.pem') as cert:
certificate = OpenSSL.crypto.load_certificate_request(crypto.FILETYPE_PEM, cert.read())
certext = OpenSSL.crypto.dump_certificate_request(crypto.FILETYPE_PEM, certificate)
print(certext)
self.sock.send(b'\x15'+certext)
def poslatKlic(self):
#posle ridici znak nasledovany klicem
self.sock.send(b'\x12'+str(self.cli.public_key).encode())
def jinenastaveniklice(self,data):
#dela zajimave veci, ale jen v urcitem pripade
self.cli.generate_shared_secret(int(data.decode()),echo_return_key=True)
superklic=str(self.cli.shared_secret)
xy=hashlib.sha256(superklic.encode()).hexdigest()[:32]
print("2222222222222222222222222222")
self.cislo=2
print(xy)
self.mujAES=Encryptor.Encryptor(xy)
def nastavitKlic(self,data):
#nastavuje klic na zaklade dat ktere dostane
self.cli.generate_shared_secret(int(data.decode()),echo_return_key=True)
superklic=str(self.cli.shared_secret)
xy=hashlib.sha256(superklic.encode()).hexdigest()[:32]
print("111111111111111111111")
self.cislo=1
print(xy)
self.mujAES=Encryptor.Encryptor(xy)
self.sock.send(b'\x14'+str(self.cli.public_key).encode())
def vyzadatKlic(self):
#nemam klic ale chci, poslu ridici znak
self.sock.send(b'\x11')
def sendMessage(self):
#hlavni chatova smycka
while True:
msg=str(input(""))
if self.mujAES is not None:
msg=self.mujAES.encrypt(msg.encode())
self.sock.send(msg)
else:
msg=msg.encode()
self.sock.send(b'\x13'+msg)
kek='127.0.0.1'
client=Client(kek)
|
test_dataloader.py
|
# Owner(s): ["module: dataloader"]
import math
import sys
import errno
import multiprocessing
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
IterDataPipe,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.map import SequenceWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_IN_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
try:
import numpy as np
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
skipIfNoNumpy = unittest.skipIf(not HAS_NUMPY, "no NumPy")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
next = __next__ # py2 compatibility
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class CustomList(list):
pass
class CustomDict(dict):
pass
def row_processor(row):
return np.add(row, 1)
def filter_len(row):
return len(row) == 4
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
@skipIfNoNumpy
def test_multiprocessing_iterdatapipe(self):
# Testing to make sure that function from global scope (e.g. imported from library) can be serialized
# and used with multiprocess DataLoader
reference = [torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64),
torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64)]
datapipe: IterDataPipe = IterableWrapper([[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]])
datapipe = datapipe.map(row_processor)
datapipe = datapipe.filter(lambda row: len(row) == 4) if HAS_DILL else datapipe.filter(filter_len)
dl_common_args = dict(num_workers=2, batch_size=2, shuffle=True, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
self.assertEqual(reference,
[t.type(torch.int64)
for t in self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(reference,
[t.type(torch.int64)
for t in
self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement and without specified num_samples
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# test sample without replacement and with specified num_samples
n = len(self.dataset) * 2
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == len(self.dataset))
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
n = len(self.dataset) - 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
n = len(self.dataset) + 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 1)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_random_sampler_len_without_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=False,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples // batch_size + (num_samples % batch_size > 0),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000)))
dl = DataLoader(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
def test_shuffle(self):
items = list(range(1000))
dp = IterableWrapper(items).sharding_filter().shuffle()
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=False)
self.assertEqual(items, list(dl))
dl = DataLoader(dp, batch_size=None, num_workers=2, shuffle=False,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertEqual(items, list(dl))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader(dp, batch_size=None, num_workers=2, shuffle=True,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
@skipIfNoDill
def test_basic_mapdatapipe_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
input_len = 100
it = list(range(input_len))
numbers_dp = SequenceWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(
numbers_dp)
process.start()
# Functional Test: Ensure that you can retrieve every element from the Queue and DataPipe
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
self.assertEqual([(x, x) for x in range(100)], actual)
# Functional Test: raise Error when input
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
with self.assertRaisesRegex(IndexError, "out of bound"):
local_datapipe[1000]
# __len__ Test: Ensure that the correct length is returned
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
self.assertEqual(input_len, len(local_datapipe))
clean_me(process, req_queue, res_queue)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_early_exit(self):
import subprocess
proc = subprocess.check_output([sys.executable, '-c', """\
import torch
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
if __name__ == '__main__':
dl = DataLoader(
RandomDataset(64, (28, 28)),
batch_size=16,
num_workers=2,
pin_memory=True,
persistent_workers=True,
multiprocessing_context="fork",
)
for _ in dl:
break
"""])
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [multiprocessing.cpu_count() - 1])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [multiprocessing.cpu_count() - 1])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
Glue.py
|
#!/usr/bin/env python
# encoding: utf-8
import sublime
import sublime_plugin
from sys import version_info
import subprocess
import os
import threading
import shlex
import json
import traceback
if version_info[0] == 3:
import io
from .GlueIO import FileReader
else:
import StringIO
from GlueIO import FileReader
class GlueCommand(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
self.settings = sublime.load_settings('Glue.sublime-settings')
self.stdout = ""
self.stderr = ""
self.exitcode = 1
self.userpath = self.settings.get('glue_userpath')
self.shellpath = self.settings.get('glue_shellpath')
self.original_env_path = os.environ['PATH']
self.ps1 = self.settings.get('glue_ps1')
self.start_dirpath = ""
self.current_dirpath = self.settings.get('glue_working_directory')
self.current_filepath = ""
self.attr_lock = threading.Lock() # thread lock for attribute reads/writes
sublime_plugin.TextCommand.__init__(self, *args, **kwargs)
#------------------------------------------------------------------------------
# [ run method ] - plugin start method
#------------------------------------------------------------------------------
def run(self, edit):
try:
#------------------------------------------------------------------------------
# Establish Current Working Directory
# 1. check for current_dirpath attribute (empty string by default)
# 2. if not set, set it
# 3. if file does not exist, make it in current directory if detected, User dir if not
# 4. if directory exists after above, then chdir into it to establish as working directory
#------------------------------------------------------------------------------
st_buffer = 0 # flag that indicates use of buffer with unsaved terminal.glue file
create_file = 0 # flag that indicates a new file should be generated to run the terminal view
self.current_filepath = self.view.file_name() # file path if file exists and is saved, otherwise None
# check the settings to see if start directory is set
if len(self.start_dirpath) == 0:
# if the buffer has been saved and the filepath exists
if self.current_filepath:
# set start directory with the file user has open
self.start_dirpath = os.path.dirname(self.current_filepath)
else:
# set current directory with User directory
self.start_dirpath = os.path.expanduser('~')
st_buffer = 1 # indicate that user is attempting to use an unsaved buffer, do not create new .glue file
if len(self.current_dirpath) == 0:
self.current_dirpath = self.start_dirpath # if it hasn't been set yet, set it to the same directory as the start dir
sublime.status_message('Glue: Current directory: ' + self.current_dirpath) # notify user of CWD
# confirm that current directory exists and chdir into it
if os.path.isdir(self.current_dirpath):
os.chdir(self.current_dirpath) # make it the current working directory
else:
bad_dir_error_msg = "Glue Plugin Error: Unable to establish your working directory. Please confirm your settings if you changed the default directory. If this is not the problem, please report this as a new issue on the GitHub repository."
sublime.error_message(bad_dir_error_msg) # launch an error dialog
#------------------------------------------------------------------------------
# Establish current buffer / file
# 1. if using unsaved buffer (i.e. buffer = 1), set current path to <user-dir>/terminal.glue
#------------------------------------------------------------------------------
if st_buffer:
self.current_filepath = os.path.join(self.start_dirpath, 'terminal.glue')
else:
if self.current_filepath: # if it is set
if self.current_filepath.endswith('.glue'):
pass # a .glue file is being used, do nothing because this is desired behavior
else:
self.current_filepath = os.path.join(self.start_dirpath, 'terminal.glue')
create_file = 1 # switch the create file flag so that a new file is generated with this path
else: # otherwise the currentdir is set and need to establish the current filepath
self.current_filepath = os.path.join(self.start_dirpath, 'terminal.glue')
create_file = 0
#------------------------------------------------------------------------------
# Establish Active View as Appropriate File
#------------------------------------------------------------------------------
if self.current_filepath.endswith('.glue'):
if self.current_filepath == self.view.file_name():
pass # do nothing, the active view is the appropriate .glue terminal file
elif self.view.file_name() == None:
self.view.set_name('terminal.glue') #set the tab name on an unsaved buffer
elif self.current_filepath != self.view.file_name(): # another file in the directory is opened
# check for an existing .glue file and open if present
gluefile_test_list = [name for name in os.listdir(self.start_dirpath) if name.endswith('.glue')]
if len(gluefile_test_list) > 0: # if there is a .glue terminal file, open it
self.view = self.view.window().open_file(os.path.join(self.start_dirpath, gluefile_test_list[0]))
else:
self.view = self.view.window().new_file()
self.view.set_name('terminal.glue')
else:
if st_buffer:
self.view.set_name('terminal.glue')
elif create_file:
# confirm that there is not a .glue file in the current directory, open it if there is
gluefile_test_list = [name for name in os.listdir(self.start_dirpath) if name.endswith('.glue')]
if len(gluefile_test_list) > 0: # if there is a .glue terminal file, open it
self.view.window().open_file(os.path.join(self.start_dirpath, gluefile_test_list[0]))
else: # otherwise, create a new one
self.view = self.view.window().new_file() # create a new file at the file path established above
self.view.set_name('terminal.glue')
#------------------------------------------------------------------------------
# Launch the Input Panel for User Input - off to the races...
#------------------------------------------------------------------------------
self.view.window().show_input_panel(self.ps1 + ' ', '', self.muterun_runner, None, None)
except Exception:
self.exception_handler()
#------------------------------------------------------------------------------
# [ cleanup method ] - odds and ends before close of plugin when 'exit' called
#------------------------------------------------------------------------------
def cleanup(self):
self.current_dirpath = "" # clear the saved working directory path
self.start_dirpath = "" # clear the start directory path for the file
self.settings.set('glue_working_directory', '') # clear the saved directory path
if sublime.platform() == "osx":
os.environ['PATH'] = self.original_env_path # cleanup any environ PATH changes that Glue performed on Mac systems
#------------------------------------------------------------------------------
# [ exception_handler ] - print stack trace for raised exceptions from Glue plugin in the editor view
#------------------------------------------------------------------------------
def exception_handler(self, user_command=''):
glue_exc_message = "Glue encountered an error. Please report this as a new issue on the GitHub repository. Here is the stack trace:\n\n"
if version_info[0] == 2:
exc_string = StringIO.StringIO()
else:
exc_string = io.StringIO()
# push the stack trace stream to the StringIO
traceback.print_exc(file=exc_string)
# get the string value of the stack trace string and assign to variable
stack_trace = exc_string.getvalue()
# create the user message
user_exc_message = glue_exc_message + '\n\n' + stack_trace
# write
self.view.run_command('glue_writer', {'text': user_exc_message, 'command': user_command, 'exit': False})
# close the StringIO stream
exc_string.close()
#------------------------------------------------------------------------------
# [ muterun_runner ] - runner method for the main execution method
# here simply to wrap it in an exception handler
#------------------------------------------------------------------------------
def muterun_runner(self, user_command):
try:
self.muterun(user_command)
except Exception:
self.exception_handler(user_command)
#------------------------------------------------------------------------------
# [ muterun method ] - parse command + runner for execution of system command
#------------------------------------------------------------------------------
def muterun(self, user_command):
# create a parsed command line string
if version_info[0] == 3:
com_args = shlex.split(user_command) # use shlex for command line handling in ST3 / Py3
else:
com_args = user_command.split() # use simple split on whitespace in ST2, Py2.6 does not support unicode in shlex
# Handle missing command when user presses enter/return key
if not com_args:
no_command_msg = "Please enter a command"
self.view.run_command('glue_writer', {'text': no_command_msg, 'command': '', 'exit': False})
# EXIT command
elif com_args[0] == "exit":
self.cleanup() # run the cleanup method
self.view.run_command('glue_writer', {'text': '', 'command': '', 'exit': True})
# CD command
elif com_args[0] == "cd":
if len(com_args) > 1:
# include the ~ user home directory idiom
if com_args[1] == "~":
change_path = os.path.expanduser('~')
else:
change_path = com_args[1]
if os.path.exists(change_path) and os.path.isdir(change_path):
os.chdir(change_path)
directory_change_abspath = os.getcwd()
dir_change_text = directory_change_abspath + '\n'
directory_change_cmd = "cd " + change_path
self.current_dirpath = directory_change_abspath
self.settings.set('glue_working_directory', directory_change_abspath)
sublime.status_message('Glue: Current directory: ' + directory_change_abspath) # notify user of CWD
self.view.run_command('glue_writer', {'text': dir_change_text, 'command': directory_change_cmd, 'exit': False})
else:
directory_change_cmd = "cd " + change_path
dirchange_error_message = "Directory path '" + change_path + "' does not exist\n"
self.view.run_command('glue_writer', {'text': dirchange_error_message, 'command': directory_change_cmd, 'exit': False})
else:
dirchange_error_message = "Please enter a path following the 'cd' command\n"
self.view.run_command('glue_writer', {'text': dirchange_error_message, 'command': 'cd', 'exit': False})
# GLUE commands
elif com_args[0] == 'glue':
glue_command = ' '.join(com_args)
if len(com_args) > 1:
# HELP Command
if com_args[1] == "--help" or com_args[1] == "-h" or com_args[1] == "help":
help_text = get_help_text()
self.view.run_command('glue_writer', {'text': help_text, 'command': glue_command, 'exit': False})
# BROWSE command
elif com_args[1] == "browse":
if len(com_args) > 2:
import webbrowser
browse_string = com_args[2]
# if they requested a url with protocol, just open it
if browse_string.startswith('http://') or browse_string.startswith('https://'):
webbrowser.open(browse_string)
else:
# check if it is a local file that user wants to open in browser
# remove the initial OS dependent filepath separator character if added (will be added back in .join method below)
if browse_string.startswith(os.sep):
browse_string = browse_string[1:] # remove the first char (?are there typically two chars '\\' in Windows?)
elif os.altsep != None:
if browse_string.startswith(os.altsep): # if there is an alternate separator (i.e. / on windows)
browse_string = browse_string[1:] # then remove it
check_path = os.path.join(os.path.abspath(self.current_dirpath), browse_string)
# test for existence of local file on the path
if self.is_file_here(check_path):
webbrowser.open('file://' + check_path) # if it is a local file, open it in browser
else:
webbrowser.open('http://' + browse_string) # if not, assume that it is a URL without protcol and add it
browser_msg = "glue browse [ " + browse_string + " ] complete\n"
self.view.run_command('glue_writer', {'text': browser_msg, 'command': glue_command, 'exit': False})
else:
browser_error_msg = "Please enter a URL or local filepath after the glue browse command\n"
self.view.run_command('glue_writer', {'text': browser_error_msg, 'command': glue_command, 'exit': False})
# CLEAR command
elif com_args[1] == "clear":
self.view.run_command('glue_clear_editor')
# keeps the input panel open for more commands
self.view.run_command('glue')
# FINDER command
elif com_args[1] == "finder":
# user is requesting a directory as an argument
if len(com_args) > 2:
finder_dirpath = com_args[2]
if os.path.isdir(finder_dirpath):
self.view.window().run_command("open_dir", {"dir": os.path.abspath(finder_dirpath)}) # open it
curdir_finder_msg = "The requested directory was opened in your finder\n"
elif os.path.isfile(finder_dirpath):
finder_dirpath = os.path.dirname(finder_dirpath)
self.view.window().run_command("open_dir", {"dir": os.path.abspath(finder_dirpath)}) # open it
curdir_finder_msg = "The requested directory was opened in your finder\n"
else:
curdir_finder_msg = "Unable to find the requested directory path. Please try again.\n"
# provide Glue view output to user after execution of the finder reveal
self.view.run_command('glue_writer', {'text': curdir_finder_msg, 'command': glue_command, 'exit': False})
# user is requesting the current working directory (i.e. no argument)
else:
if len(self.current_dirpath) > 0 and os.path.isdir(self.current_dirpath):
self.view.window().run_command("open_dir", {"dir": self.current_dirpath})
curdir_finder_msg = "The current directory was opened in your finder.\n"
self.view.run_command('glue_writer', {'text': curdir_finder_msg, 'command': glue_command, 'exit': False})
else:
curdir_finderror_msg = "Unable to detect the current working directory. Please restart the Glue plugin and try again.\n"
self.view.run_command('glue_writer', {'text': curdir_finderror_msg, 'command': glue_command, 'exit': False})
# GOTO command
elif com_args[1] == "goto":
if len(com_args) > 2:
goto_user_msg = "goto " + com_args[2] + " completed\n"
self.view.window().run_command("show_overlay", {"overlay": "goto", "show_files": True, "text": com_args[2]})
self.view.run_command('glue_writer', {'text': goto_user_msg, 'command': glue_command, 'exit': False})
else:
# if no query string, just open the overlay
goto_user_msg = "goto overlay launch completed\n"
self.view.window().run_command("show_overlay", {"overlay": "goto", "show_files": True})
self.view.run_command('glue_writer', {'text': goto_user_msg, 'command': glue_command, 'exit': False})
# LOCALHOST command
elif com_args[1] == "localhost":
import webbrowser
localhost_url = 'http://localhost:8000'
if len(com_args) > 2:
protocol = com_args[2] # the argument is the requested protocol (doesn't perform sanity check)
localhost_url = 'http://localhost:' + protocol
webbrowser.open(localhost_url)
localhost_browse_msg = "glue localhost complete\n"
self.view.run_command('glue_writer', {'text': localhost_browse_msg, 'command': glue_command, 'exit': False})
# NEW command
elif com_args[1] == "new":
filenew_text = "glue new command completed\n"
self.view.run_command('glue_writer', {'text': filenew_text, 'command': glue_command, 'exit': False})
self.view.window().new_file()
# OPEN command
elif com_args[1] == "open":
if len(com_args) > 2:
fileopen_text = "glue open command completed\n"
self.view.run_command('glue_writer', {'text': fileopen_text, 'command': glue_command, 'exit': False})
self.view.window().run_command('glue_file_opener', {'current_dir': self.current_dirpath, 'file_list': com_args[2:]})
else:
missing_file_error_msg = "Please enter at least one filepath after the open command.\n"
self.view.run_command('glue_writer', {'text': missing_file_error_msg, 'command': glue_command, 'exit': False})
# PATH command
elif com_args[1] == "path":
if len(self.userpath) == 0:
# obtain the 'real' mac osx path using the get_mac_path method if not set by user
if sublime.platform() == "osx":
# get the PATH
updated_path = self.get_mac_path() # attempt to obtain the PATH set in the user's respective shell startup file
# set the Mac environ PATH to the obtained PATH
os.environ['PATH'] = updated_path
# assign the PATH to the self.userpath attribute for the executable search below (and for reuse while running)
self.userpath = updated_path
the_path = self.userpath
elif sublime.platform() == "windows":
the_path = os.environ['PATH']
# do not set the PATH in Windows, letting Win shell handle the command
elif sublime.platform() == "linux":
self.userpath = os.environ['PATH']
the_path = self.userpath
else:
# if there is a self.userpath that is set (user set in settings, previously set above) then set Python environ PATH string
the_path = self.userpath
self.view.run_command('glue_writer', {'text': the_path + '\n', 'command': glue_command, 'exit': False})
# TEMPLATE command
elif com_args[1] == "template":
if len(com_args) > 2:
template_name = ""
template_filename = ""
template_multi = False
# test for the flag and name option in the user command
for argument in com_args[2:]: # only test the arguments after the 'template' subcommand
if "--multi" in argument:
template_multi = True # user indicated that the file will specify multiple file paths
elif argument.startswith('--name='):
name_list = argument.split('=')
template_filename = name_list[1] # the user assigned file write name of the file
else:
template_name = argument # if it is not one of the above options, then it is the requested template name
print_string = template_name + " " + template_filename + " " + str(template_multi)
self.view.run_command('glue_writer', {'text': print_string, 'command': glue_command, 'exit': False})
else:
# user did not enter a template name
template_err_msg = "Please enter a template name after your command.\n"
self.view.run_command('glue_writer', {'text': template_err_msg, 'command': glue_command, 'exit': False})
# USER command
elif com_args[1] == "user":
uc_file_path = os.path.join(sublime.packages_path(), 'Glue-Commands', 'glue.json')
if self.is_file_here(uc_file_path):
fr = FileReader(uc_file_path)
user_json = fr.read_utf8()
usercom_dict = json.loads(user_json)
if len(usercom_dict) > 0:
if len(usercom_dict) == 1:
com_extension_string = 'extension'
com_number_string = 'lonely'
else:
com_extension_string = 'extensions'
com_number_string = str(len(usercom_dict))
number_com_msg = "Your " + com_number_string + " Glue " + com_extension_string + ":\n\n"
com_list = []
for key, value in self.xitems(usercom_dict):
com_string = key + " : " + value
com_list.append(com_string)
com_string = '\n'.join(sorted(com_list))
com_string = number_com_msg + com_string + '\n'
self.view.run_command('glue_writer', {'text': com_string, 'command': glue_command, 'exit': False})
else:
user_error_msg = "Your glue.json file does not contain any commands\n"
self.view.run_command('glue_writer', {'text': user_error_msg, 'command': glue_command, 'exit': False})
else:
usercom_error_msg = "The glue.json file could not be found. Please confirm that this is contained in a Glue-Commands directory in your Sublime Text Packages directory.\n"
self.view.run_command('glue_writer', {'text': usercom_error_msg, 'command': glue_command, 'exit': False})
# WCO command
elif com_args[1] == "wco":
if len(com_args) > 2:
fileopen_text = "glue wco command completed\n"
self.view.run_command('glue_writer', {'text': fileopen_text, 'command': glue_command, 'exit': False})
self.view.window().run_command('glue_file_wildcard_opener', {'current_dir': self.current_dirpath, 'match_pattern': com_args[2]})
else:
missing_file_error_msg = "Please enter at least one filepath after the open command.\n"
self.view.run_command('glue_writer', {'text': missing_file_error_msg, 'command': glue_command, 'exit': False})
# TEST command
elif com_args[1] == "test":
pass
# test open containing folder
#self.view.window().run_command("open_dir", {"dir": self.current_dirpath})
# self.view.run_command('glue_writer', {'text': current_proj, 'command': glue_command, 'exit': False})
# USER ALIAS commands
else:
if len(com_args) > 1:
uc_file_path = os.path.join(sublime.packages_path(), 'Glue-Commands', 'glue.json')
if self.is_file_here(uc_file_path):
fr = FileReader(uc_file_path)
user_json = fr.read_utf8()
usercom_dict = json.loads(user_json)
# if arguments from command, add those in location indicated by the file
if len(com_args) > 2:
# arguments were included on the command line, pass them to the user command
arguments = ' '.join(com_args[2:])
else:
# no additional arguments were included so pass empty string if there is an {{args}} tag
arguments = ''
if com_args[1] in usercom_dict:
user_command = usercom_dict[com_args[1]]
user_command = user_command.replace('{{args}}', arguments) # replace with CL args
user_command = user_command.replace('{{pwd}}', os.getcwd()) # replace with working dir path
user_command = user_command.replace('{{clipboard}}', sublime.get_clipboard()) # replace with contents of clipboard
self.muterun(user_command) # execute the command
else:
# didn't find a glue alias with the requested name in the existing glue alias settings file
bad_cmd_error_msg = "Glue could not identify that command. Please try again.\n"
self.view.run_command('glue_writer', {'text': bad_cmd_error_msg, 'command': glue_command, 'exit': False})
# Didn't find a glue alias setting file, provide error message
else:
bad_cmd_error_msg = "Glue could not identify that command. Please try again.\n"
self.view.run_command('glue_writer', {'text': bad_cmd_error_msg, 'command': glue_command, 'exit': False})
else:
missing_arg_error_msg = "Glue requires an argument. Please use 'glue help' for for more information.\n"
self.view.run_command('glue_writer', {'text': missing_arg_error_msg, 'command': glue_command, 'exit': False})
# Execute the system command that was entered
else:
try:
if len(com_args) > 0:
arguments = ' '.join(com_args[1:])
else:
arguments = ''
command = os.path.join(self.get_path(com_args[0]), com_args[0]) + " " + arguments
t = threading.Thread(target=self.execute_command, args=(command, user_command))
t.start() # launch the thread to execute the command
self.progress_indicator(t) # provide progress indicator
self.print_on_complete(t, user_command) # polls for completion of the thread and prints to editor
except Exception as e:
raise e
#------------------------------------------------------------------------------
# [ is_file_here ] - returns boolean for presence of filepath
#------------------------------------------------------------------------------
def is_file_here(self, filepath):
if os.path.exists(filepath) and os.path.isfile(filepath):
return True
else:
return False
#------------------------------------------------------------------------------
# [ get_mac_path method ] - obtain the user PATH setting on the Mac from bash
#------------------------------------------------------------------------------
def get_mac_path(self):
pathgetter = "bash -ilc 'echo -n $PATH'"
updated_path = subprocess.Popen(pathgetter, stdout=subprocess.PIPE, shell=True).stdout.read()
# update the shell PATH with this path
return updated_path.decode("utf-8").rstrip().rstrip(':')
#------------------------------------------------------------------------------
# [ get_path method ] - find the correct path to the executable from the user's PATH settings
#------------------------------------------------------------------------------
def get_path(self, executable):
# if it is not set, attempt to use the environment PATH variable that Python returns
if len(self.userpath) == 0:
# set the mac osx PATH with os.environ['PATH'] - fix for OSX PATH set issue in with Python subprocess module
if sublime.platform() == "osx":
# get the PATH
updated_path = self.get_mac_path() # obtain the PATH set in the user's respective shell rc file
# set the Mac environ PATH to the obtained PATH
os.environ['PATH'] = updated_path
# assign the PATH to the self.userpath attribute for the executable search below (and for reuse while running)
self.userpath = updated_path
elif sublime.platform == "windows":
pass # do nothing, do not want to set path on Win, let Win shell handle it...
elif sublime.platform == "linux":
self.userpath = os.environ['PATH']
else:
# fix for Mac OSX users PATH settings
if sublime.platform() == "osx":
os.environ['PATH'] = self.userpath
# need to keep the Windows ; PATH separator logic first because the : will match in Windows paths like C:\blah
if ';' in self.userpath:
paths = self.userpath.split(';')
for path in paths:
test_path = os.path.join(path, executable)
# Windows unicode test in Py2
if version_info[0] == 2:
if os.path.isfile(unicode(test_path)):
return path
# otherwise perform standard string comparisons
if os.path.isfile(test_path):
return path
elif os.path.islink(test_path):
return os.path.dirname(os.path.realpath(test_path))
# if the method did not return with found path, just return empty path and keep fingers crossed...
return ''
elif ':' in self.userpath:
paths = self.userpath.split(':')
for path in paths:
test_path = os.path.join(path, executable)
# Unicode test in Py2, determine whether unicode string matches for OS that encodes as unicode
if version_info[0] == 2:
if os.path.isfile(unicode(test_path)):
return path
# otherwise perform standard string comparisons (Py3 str incorporates unicode type from Py2)
if os.path.isfile(test_path):
return path
elif os.path.islink(test_path):
return os.path.dirname(os.path.realpath(test_path))
# if the method did not return with found path, just return empty path and keep fingers crossed...
return ''
else:
# there was one path in the setting, so return it as the proper path to executable
return self.userpath
#------------------------------------------------------------------------------
# [ print_on_complete method ] - print to editor from main thread when cmd execution complete
#------------------------------------------------------------------------------
def print_on_complete(self, thread, user_command):
if thread.is_alive():
sublime.set_timeout(lambda: self.print_on_complete(thread, user_command), 20)
return
else:
# command was successful
if self.exitcode == 0:
# clean the standard output string
clean_stdout = self.clean_output(self.stdout)
self.view.run_command('glue_writer', {'text': clean_stdout, 'command': user_command})
# command was not successful (non-zero exit status)
else:
self.view.run_command('glue_writer', {'text': self.stderr, 'command': user_command})
# print to stdout as well - removed
# self.print_response()
#------------------------------------------------------------------------------
# [ clean_output method ] - remove special characters that should not be printed to standard output view
#------------------------------------------------------------------------------
def clean_output(self, stdout_string):
# remove carriage return char (they display as CR in ST)
stdout_string = stdout_string.replace('\r\n', '\n') # include this above the '\r' statement so that user does not get '\n\n' replacements
stdout_string = stdout_string.replace('\r', '\n')
return stdout_string
#------------------------------------------------------------------------------
# [ progress_indicator method ] - display progress indicator for long running processes
#------------------------------------------------------------------------------
def progress_indicator(self, thread, i=0, direction=1):
if thread.is_alive():
before = i % 8
after = (7) - before
if not after:
direction = -1
if not before:
direction = 1
i += direction
self.view.set_status('glue_status_indicator', 'Glue: Running command [%s|%s]' % (' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.progress_indicator(thread, i, direction), 75)
return
else:
self.view.erase_status('glue_status_indicator')
sublime.status_message('Glue: Command completed.')
#------------------------------------------------------------------------------
# [ execute_command method ] - execute a system command
# run in a separate thread from muterun() method above
# assigns stdout stderr and exitcode in instance attributes
#------------------------------------------------------------------------------
def execute_command(self, command, user_command):
# Python 3 version = Sublime Text 3 version
if version_info[0] == 3:
try:
# execute the system command (with user assigned shell if glue_shellpath is set)
if len(self.shellpath) == 0:
response = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
elif os.path.exists(self.shellpath) and os.path.isfile(self.shellpath):
response = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True, executable=self.shellpath)
else:
# run the default shell type if cannot identify the shellpath that the user assigned
response = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
# acquire thread lock on attribute data
with self.attr_lock:
self.exitcode = 0
self.stdout = response.decode('utf-8')
except subprocess.CalledProcessError as cpe:
# acquire thread lock on the attribute data
with self.attr_lock:
self.stderr = cpe.output.decode('utf-8')
if cpe.returncode:
self.exitcode = cpe.returncode
else:
self.exitcode = 1
except Exception as e:
raise e
# Python 2 version = Sublime Text 2 version
else:
try:
if len(self.shellpath) == 0:
response = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
elif os.path.exists(self.shellpath) and os.path.isfile(self.shellpath):
response = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable=self.shellpath)
else:
# run the default shell if cannot identify the shellpath that the user assigned
response = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = response.communicate()
with self.attr_lock: # use the attribute lock (separate thread)
self.stdout = stdout.decode('utf-8')
self.stderr = stderr.decode('utf-8')
self.exitcode = response.returncode
except Exception as e:
raise e
#------------------------------------------------------------------------------
# [ print_response method ] - print a string to the stdout on ST console
#------------------------------------------------------------------------------
def print_response(self):
with self.attr_lock:
excode = self.exitcode
if excode == 0:
with self.attr_lock:
print(self.stdout)
else:
with self.attr_lock:
print(self.stderr)
#------------------------------------------------------------------------------
# [ xitems iterator ] - uses appropriate method from Py2 and Py3 to iterate through dict items
#------------------------------------------------------------------------------
def xitems(self, the_dict):
if version_info[0] == 3:
return the_dict.items()
else:
return the_dict.iteritems()
#------------------------------------------------------------------------------
# [ GlueWriterCommand class ] - writes to a ST view
#------------------------------------------------------------------------------
class GlueWriterCommand(sublime_plugin.TextCommand):
def __init__(self, *args, **kwargs):
self.settings = sublime.load_settings('Glue.sublime-settings')
self.ps1 = self.settings.get('glue_ps1')
self.show_path = self.settings.get('glue_display_path')
self.exit_message = self.settings.get('glue_exit_message')
sublime_plugin.TextCommand.__init__(self, *args, **kwargs)
def run(self, edit, text="", command="", exit=False):
path_string = "[ " + os.getcwd() + " ]"
if not exit:
if self.show_path:
command_line = self.ps1 + " " + path_string + " " + command + "\n"
else:
command_line = self.ps1 + " " + command + "\n"
self.view.insert(edit, self.view.sel()[0].begin(), command_line)
text = text + '\n'
self.view.insert(edit, self.view.sel()[0].begin(), text)
self.view.show(self.view.sel()[0].begin())
# keeps the input panel open for more commands
self.view.run_command('glue')
else:
# do not reopen the input panel with the run_command call above
if self.show_path:
exit_command = self.ps1 + " " + path_string + " exit\n"
else:
exit_command = self.ps1 + " exit\n"
exit_string = self.exit_message + "\n"
self.view.insert(edit, self.view.sel()[0].begin(), exit_command)
self.view.insert(edit, self.view.sel()[0].begin(), exit_string)
self.view.show(self.view.sel()[0].begin())
#------------------------------------------------------------------------------
# [ GlueClearEditorCommand class ] - clears the editor window
#------------------------------------------------------------------------------
class GlueClearEditorCommand(sublime_plugin.TextCommand):
def run(self, edit):
the_viewer = sublime.Region(0, self.view.size())
self.view.erase(edit, the_viewer)
#------------------------------------------------------------------------------
# [ get_help_text function ] - returns the user help string
#------------------------------------------------------------------------------
def get_help_text():
help_string = """
__
.-----| .--.--.-----.
| _ | | | | -__|
|___ |__|_____|_____|
|_____|
Copyright 2014 Christopher Simpkins | MIT License
Glue joins your shell to Sublime Text in quasi-perfect harmony.
USAGE
<command> [option(s)]
Enter a system command in the input panel at the bottom of your editor using the same syntax that you use in your terminal. The standard output stream from the executable is printed in the active view of your editor after it returns.
To quit Glue, submit the command 'exit'.
COMMANDS
Glue provides the following additional commands:
glue browse <url,path> Open default browser to <url> or local <path>
glue clear Clear the text in the Glue view
glue finder [path] Reveal current directory (default) or [path] directory in finder
glue goto <query> Sublime Text Goto Anything search for <query>
glue help Glue help
glue localhost [port] Open browser to localhost:8000 or optional localhost:[port]
glue new Create a new Sublime Text buffer
glue open <path> Open a file at <path> in the editor. Accepts multiple <path>
glue path View your PATH settings
glue user View your Glue extensions (if present)
glue wco <pattern> Open file(s) with wildcard <pattern> in the editor
USER COMMANDS
Create a `Glue-Commands` directory inside your Sublime Text `Packages` directory. Create a `glue.json` file inside the `Glue-Commands` directory. Then map your JSON key:value as "command-name": "system command string".
You have the option to include the following replacement tags in your system command string:
{{args}} additional arguments that you include on the command line
{{clipboard}} the contents of the clipboard
{{pwd}} the current working directory path
Launch Glue and run your command extension(s) with the following syntax:
glue <command-name> [args]
Your command is executed from your current working directory. Please see the documentation for additional details.
NAVIGATION
The working directory is initially set to the directory containing the buffer in which you are using Glue (when you open from sidebar right click menu or with a project file open in the editor).
Change directories with the 'cd' command:
cd <directory path> Make `directory path` the working directory
cd .. Make parent directory the working directory
cd ~ Make user home directory the working directory
Note that your working directory defaults to the system User directory if you launch Glue from the Command Palette without having an open project file in the editor (or in a clean editor window without an open project).
ISSUES
Please submit bug reports on the GitHub repository @ https://github.com/chrissimpkins/glue/issues
HELP
Detailed help is available @ http://gluedocs.readthedocs.org/
"""
return help_string
|
postgresql_database_tests.py
|
from datetime import datetime
import unittest
import psycopg2
from time import sleep
from pyDBMS.database.postgres_database import PostgresDatabase
from tests.example_types import LogTimestamp, SimpleChildModel, SimpleModel, SpecialDate
class TestPostgresDB(unittest.TestCase):
DATABASE_NAME = 'test_database'
def setUp(self):
self.conn = psycopg2.connect(host='localhost',dbname='postgres', port='5432', user='postgres', password='password')
self.conn.autocommit = True
self._clear_database(self.conn)
self.conn = psycopg2.connect(host='localhost',dbname=self.DATABASE_NAME, port='5432', user='postgres', password='password')
print('hello')
self.conn.cursor().execute('''CREATE TABLE simple_model (model_id TEXT PRIMARY KEY,integer_column INTEGER, float_column FLOAT)''')
self.conn.commit()
self.db = PostgresDatabase(host='localhost',dbname=self.DATABASE_NAME, port='5432', user='postgres', password='password')
# @classmethod
# def setUpClass(cls):
# cls.database_image_process = Process(target=cls._boot_image,args=(cls,))
# cls.database_image_process.start()
# sleep(10)
# @classmethod
# def tearDownClass(cls):
# cls.database_image_process.kill()
# def _boot_image(cls):
# subprocess.run(['bash', '-c', 'docker pull postgres && docker run -d --name postgres -p 5432:4424 -e POSTGRES_USERNAME="postgres" -e POSTGRES_DATABASE="postgres" -e POSTGRES_PASSWORD="password" postgres'])
def _clear_database(self, connection):
cur = connection.cursor()
cur.execute('''SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid() AND datname = 'test_database';''')
connection.commit()
cur.execute(f'drop database if exists {self.DATABASE_NAME}')
cur.execute(f'CREATE DATABASE {self.DATABASE_NAME}')
def test_get_tables(self):
tables = self.db.get_tables()
self.assertIn('simple_model', tables)
def test_get_columns(self):
columns = self.db.get_columns('simple_model')
self.assertListEqual(sorted(columns), sorted(['model_id','integer_column','float_column']))
def test_get_columns_for_nonexistant_table(self):
with self.assertRaises(KeyError):
self.db.get_columns('invalid_table')
def test_table_exists(self):
self.assertTrue(self.db.table_exists('simple_model'))
def test_table_not_exists(self):
self.assertFalse(self.db.table_exists('invalid_model'))
def test_model_exists(self):
self.assertTrue(self.db.model_exists(SimpleModel))
def test_model_not_exists(self):
self.assertFalse(self.db.model_exists(SimpleChildModel))
def test_get_model_meta(self):
model = self.db.get_model_meta('simple_model')
self.assertSetEqual(set(model.fields), {'model_id', 'integer_column', 'float_column'})
self.assertTrue(self.db.model_exists(model))
def test_create_model(self):
self.assertFalse(self.db.table_exists('simple_child_model'))
self.db.create_model(SimpleChildModel())
self.assertTrue(self.db.table_exists('simple_child_model'))
def test_insert_model(self):
model = SimpleModel(model_id='test_id',integer_column=100)
self.db.insert(model)
sleep(1)
cur = self.conn.cursor()
cur.execute('select model_id, integer_column, float_column from simple_model')
result = cur.fetchone()
self.assertEqual(result[0], 'test_id')
def test_get_model_meta(self):
model = self.db.get_model_meta('simple_model')
self.assertSetEqual(set(model.fields), {'model_id', 'integer_column', 'float_column'})
self.assertTrue(self.db.model_exists(model))
def test_model_insert_passing_class(self):
self.assertFalse(self.db.model_exists(SimpleChildModel))
self.db.create_model(SimpleChildModel)
self.assertTrue(self.db.model_exists(SimpleChildModel))
def test_select_model_with_single_kwarg(self):
self._insert_empty_test_model()
results = self.db.select(SimpleModel, model_id='test_id')
self.assertEqual(1, len(results))
self.assertEqual(results[0]['model_id'],'test_id')
def test_select_model_with_multiple_kwargs(self):
self._insert_empty_test_model()
results = self.db.select(SimpleModel, model_id = 'test_id', integer_column=100)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['model_id'],'test_id')
def _insert_empty_test_model(self, model_id = 'test_id', integer_column=100, float_column=None):
self.conn.cursor().execute('insert into simple_model(model_id, integer_column, float_column) VALUES (%s, %s, %s)', [model_id, integer_column, float_column])
self.conn.commit()
def test_create_model(self):
self.assertFalse(self.db.table_exists('simple_child_model'))
self.db.create_model(SimpleChildModel())
self.assertTrue(self.db.table_exists('simple_child_model'))
def test_select_model_with_null(self):
self._insert_empty_test_model()
results = self.db.select(SimpleModel, float_column=None)
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['model_id'],'test_id')
def test_select_model_with_null_and_kwarg(self):
self._insert_empty_test_model()
self._insert_empty_test_model('test_id2',200,1.0)
results = self.db.select(SimpleModel, float_column=[None, 1.0])
returned_ids = sorted([x['model_id'] for x in results])
self.assertEqual(2, len(results))
self.assertEqual(['test_id','test_id2'], returned_ids)
def test_select_with_multiple_args_for_field(self):
self._insert_empty_test_model()
self._insert_empty_test_model('test_id2',200,1.0)
results = self.db.select(SimpleModel,model_id=['test_id','test_id2'])
returned_ids = sorted([x['model_id'] for x in results])
self.assertEqual(2, len(results))
self.assertEqual(['test_id','test_id2'], returned_ids)
def test_update_simple_model(self):
self._insert_empty_test_model('test_id2',200,None)
model = SimpleModel(model_id='test_id2',integer_column=200,float_column=1.0)
rows_affected = self.db.update(model)
self.assertEqual(1, rows_affected)
sleep(2)
cur = self.conn.cursor()
cur.execute('select float_column from simple_model')
results = cur.fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(1.0, results[0][0])
def test_delete_without_kwargs(self):
self._insert_empty_test_model()
cur = self.conn.cursor()
self.db.delete(SimpleModel,False)
sleep(1)
cur.execute('select * from simple_model')
results = cur.fetchall()
self.assertEqual(len(results), 1)
self.db.delete(SimpleModel,True)
sleep(1)
cur.execute('select * from simple_model')
results = cur.fetchall()
self.assertEqual(len(results), 0)
def test_delete_with_single_kwarg(self):
self._insert_empty_test_model()
self._insert_empty_test_model('test_id2',200,1.0)
cur = self.conn.cursor()
self.db.delete(SimpleModel, float_column=1.0)
sleep(4)
cur.execute('select * from simple_model')
self.assertEqual(1, len(cur.fetchall()))
def test_insert_and_select_date(self):
self.db.create_model(SpecialDate)
model = SpecialDate()
model['timestamp'] = datetime.now()
model['model_id'] = 'test_id'
self.db.insert(model)
sleep(2)
result = self.db.select(SpecialDate)
self.assertEqual(1, len(result))
result = result[0]
self.assertEqual(model['timestamp'], result['timestamp'])
def test_insert_and_select_datetime(self):
self.db.create_model(LogTimestamp)
model = LogTimestamp()
model['timestamp'] = datetime.now()
model['model_id'] = 'test_id'
self.db.insert(model)
sleep(2)
result = self.db.select(LogTimestamp)
self.assertEqual(1, len(result))
result = result[0]
self.assertEqual(model['timestamp'], result['timestamp'])
def test_insert_table_with_timestamp(self):
self.assertNotIn('log_timestamp_model', self.db.get_tables())
self.db.create_model(LogTimestamp)
self.assertIn('log_timestamp_model', self.db.get_tables())
|
test_windbgmon.py
|
import os
import threading
import win32api
import windbgmon
def test_dbgmon():
messages = []
with windbgmon.DbgMon() as dbgmon:
def monitor():
for pid, msg in dbgmon:
messages.append((pid, msg))
thread = threading.Thread(target=monitor)
thread.start()
win32api.OutputDebugString("Hello, World!")
dbgmon.stop()
thread.join()
assert (os.getpid(), "Hello, World!") in messages
|
client3.py
|
import socket
import threading
from datetime import datetime
import traceback
import sys
import queue
class TCP_Nonblocking_Client:
def __init__(self, host, port, name):
self.host = host
self.port = port
self.sock = None
self.format = 'utf-8'
self.received_messages = queue.Queue()
self.stop_client = False
self.name = name
def print_tstamp(self, msg):
current_time = datetime.now().strftime("%Y-%M-%d %H:%M:%S")
print(f'[{current_time}] [CLIENT] {msg}')
def create_socket(self):
self.print_tstamp('Creating socket...')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.print_tstamp(f'Socket created')
def connect_to_server(self):
try:
self.print_tstamp(f'Connecting to server [{self.host}] on port [{self.port}]...')
self.sock.connect((self.host, self.port))
self.print_tstamp(f'Connected to server [{self.host}] on port [{self.port}]')
except socket.error:
self.stop_client = True
self.print_tstamp('Encountered an error:')
traceback.print_exc()
except OSError as err:
self.stop_client = True
self.print_tstamp('Encountered an error:')
traceback.print_exc()
def send_message(self, msg):
try:
if msg:
send_info = self.sock.send(msg.encode(self.format))
self.print_tstamp(f'Sent {send_info} bytes to the server')
except OSError as err:
self.stop_client = True
self.print_tstamp('Encountered an error:')
traceback.print_exc()
def shutdown_socket(self):
self.print_tstamp('Closing socket...')
self.sock.close()
self.print_tstamp('Socket closed')
def read_message_loop(self):
# if function returns value then error has occured and interaction should be halted
if self.connect_to_server():
return
while True:
try:
msg = self.sock.recv(1024).decode(self.format)
except socket.timeout:
self.print_tstamp('Socket timed out, retrying receive')
continue
except:
self.print_tstamp('Encountered socket error:')
traceback.print_exc()
break
if msg == '':
# connection closed by peer, exit loop
self.print_tstamp('Connection closed by server')
break
self.print_tstamp(f'Received from [SERVER]: {msg}')
self.received_messages.put(msg)
self.shutdown_socket()
self.stop_client = True
def run_socket():
try:
tcp_client = TCP_Nonblocking_Client('139.162.172.141', 8080, 'Jeff')
tcp_client.create_socket()
thread = threading.Thread(target=tcp_client.read_message_loop)
thread.daemon = True
thread.start()
while True:
message = input()
if tcp_client.stop_client:
tcp_client.print_tstamp('Socket already closed, message not sent')
break
tcp_client.send_message(message)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run_socket()
|
inspirobot.py
|
#!/usr/bin/python
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
"""
Variety quote plugin sourcing quotes from inspirobit.me
This script is placed in '~/.config/variety/plugins' and then activated from inside Variety's
Preferences Quotes menu
If script fails, you need to run: pip install requests
"""
import logging
import random
from threading import Thread
import requests
from locale import gettext as _
from variety.Util import Util
from variety.plugins.IQuoteSource import IQuoteSource
from io import BytesIO
logger = logging.getLogger("variety")
def fetchAndSaveImages(image_mame):
r = requests.get('https://source.unsplash.com/' + image_mame + '/1600x900')
if(r.status_code == 200):
with open('.config/variety/Fetched/' + image_mame + '.jpg', 'wb') as file:
image_bytes = BytesIO(r.content)
file.write(image_bytes.getvalue())
image_bytes.close()
else:
logger.warning(r.content)
class GoodreadsSource(IQuoteSource):
"""
Retrives quotes from inspirobot.me.
Attributes:
quotes(list): list containing the quotes
"""
def __init__(self):
super(IQuoteSource, self).__init__()
self.quotes = []
@classmethod
def get_info(cls):
return {
"name": "inspirobot",
"description": _("AI generated quotes from inspirobot.me"),
"author": "0rpheu",
"version": "0.1"
}
def supports_search(self):
return False
def activate(self):
if self.active:
return
self.active = True
def deactivate(self):
self.active = False
def fetch_inspirobot_quotes(self):
quotes = []
response = requests.get('https://inspirobot.me/api?generateFlow=1')
json_object = response.json()
for row in json_object['data']:
if row['type'] == "quote":
quotes.append(
{
"quote": row['text'],
"author": "",
"sourceName": "inspirobot.me",
"link": " https://inspirobot.me"
}
)
elif row['type'] == "transition":
logger.warning('thread to fetch image ' + 'https://source.unsplash.com/' + row['image'] + '/1600x900')
thread = Thread(target = fetchAndSaveImages, args = (row['image'],))
thread.start()
return quotes
def get_for_author(self, author):
return []
def get_for_keyword(self, keyword):
return []
def get_random(self):
return self.fetch_inspirobot_quotes()
|
run.py
|
import sys
import signal
import threading
import asyncio
import aiohttp
import conf_loader
import notifier
import bili_sched
import printer
import bili_statistics
from console_cmd import ConsoleCmd
from tasks.login import LoginTask
from tasks.live_daily_job import (
HeartBeatTask,
OpenSilverBoxTask,
RecvDailyBagTask,
SignTask,
WatchTvTask,
SignFansGroupsTask,
SendGiftTask,
ExchangeSilverCoinTask
)
from tasks.main_daily_job import (
JudgeCaseTask,
BiliMainTask,
DahuiyuanTask
)
from tasks.manga_daily_job import (
ShareComicTask,
MangaSignTask,
)
from tasks.utils import UtilsTask
# 弹幕
from danmu.bili_danmu_monitor import DanmuPrinter, DanmuRaffleMonitor
from danmu.yj_monitor import TcpYjMonitorClient
from danmu import raffle_handler
# 实物抽奖
from substance.monitor_substance_raffle import SubstanceRaffleMonitor
from dyn.monitor_dyn_raffle import DynRaffleMonitor
loop = asyncio.get_event_loop()
dict_user = conf_loader.read_user()
dict_bili = conf_loader.read_bili()
dict_color = conf_loader.read_color()
dict_ctrl = conf_loader.read_ctrl()
dict_task = conf_loader.read_task()
printer.init_config(dict_color, dict_ctrl['print_control']['danmu'])
############################################################################
############################################################################
# 👇users 录入程序
async def init_users():
global_task_control = dict_task['global_task_control']
custom_task_control = dict_task['custom_task_control']
global_task_arrangement = dict_task['global_task_arrangement']
custom_task_arrangement = dict_task['custom_task_arrangement']
users = notifier.Users(global_task_control=global_task_control,
global_task_arrangement=global_task_arrangement,
dict_bili=dict_bili,
force_sleep=bili_sched.force_sleep)
notifier.init(users=users)
assert dict_user['users'] # 这个 list 为 true 表示至少要有一个用户信息
for user_info in dict_user['users']:
username = user_info['username']
await notifier.add_user(user_info=user_info,
custom_task_control=custom_task_control.get(username, {}),
custom_task_arrangement=custom_task_arrangement.get(username, {}))
loop.run_until_complete(init_users())
############################################################################
############################################################################
# 👇重复任务录入程序
# 时间间隔为小时,同时每次休眠结束都会计时归零,重新从当前时间计算时间间隔
# 下面表示每隔多少小时执行一次
def add_daily_jobs():
bili_sched.add_daily_jobs(HeartBeatTask, every_hours=6)
bili_sched.add_daily_jobs(OpenSilverBoxTask, every_hours=6)
bili_sched.add_daily_jobs(RecvDailyBagTask, every_hours=3)
bili_sched.add_daily_jobs(SignTask, every_hours=6)
bili_sched.add_daily_jobs(WatchTvTask, every_hours=6)
bili_sched.add_daily_jobs(SignFansGroupsTask, every_hours=6)
bili_sched.add_daily_jobs(SendGiftTask, every_hours=2)
bili_sched.add_daily_jobs(ExchangeSilverCoinTask, every_hours=6)
bili_sched.add_daily_jobs(JudgeCaseTask, every_hours=0.75)
bili_sched.add_daily_jobs(BiliMainTask, every_hours=4)
bili_sched.add_daily_jobs(MangaSignTask, every_hours=6)
bili_sched.add_daily_jobs(ShareComicTask, every_hours=6)
bili_sched.add_daily_jobs(DahuiyuanTask, every_hours=6)
add_daily_jobs()
############################################################################
############################################################################
loop.run_until_complete(notifier.exec_task(LoginTask))
other_control = dict_ctrl['other_control']
area_ids = loop.run_until_complete(notifier.exec_func(UtilsTask.fetch_blive_areas))
area_duplicated = other_control['area_duplicated']
if area_duplicated:
area_ids *= 2
bili_statistics.init(area_num=len(area_ids), area_duplicated=area_duplicated)
default_roomid = other_control['default_monitor_roomid']
############################################################################
############################################################################
# 👇录入 monitors
# aiohttp sb session
async def init_monitors():
session = aiohttp.ClientSession()
monitors_ = []
# 弹幕打印功能
danmu_printer_ = DanmuPrinter(
room_id=default_roomid,
area_id=-1,
session=session)
# 弹幕抽奖监控
for area_id in area_ids:
monitor = DanmuRaffleMonitor(
room_id=0,
area_id=area_id,
session=session)
monitors_.append(monitor)
# yjmonitor 弹幕监控
yjmonitor_tcp_addr = other_control['yjmonitor_tcp_addr']
yjmonitor_tcp_key = other_control['yjmonitor_tcp_key']
if yjmonitor_tcp_key:
monitor = TcpYjMonitorClient(
key=yjmonitor_tcp_key,
url=yjmonitor_tcp_addr,
area_id=0)
monitors_.append(monitor)
if other_control['substance_raffle']:
monitors_.append(SubstanceRaffleMonitor())
if other_control['dyn_raffle']:
monitors_.append(DynRaffleMonitor(
should_join_immediately=other_control['join_dyn_raffle_at_once']))
return danmu_printer_, monitors_
danmu_printer, monitors = loop.run_until_complete(init_monitors())
############################################################################
############################################################################
bili_sched.init(monitors=monitors, sleep_ranges=dict_ctrl['other_control']['sleep_ranges'])
# 初始化控制台
if sys.platform != 'linux' or signal.getsignal(signal.SIGHUP) == signal.SIG_DFL:
console_thread = threading.Thread(
target=ConsoleCmd(loop, default_roomid, danmu_printer).cmdloop)
console_thread.start()
else:
console_thread = None
tasks = [monitor.run() for monitor in monitors]
other_tasks = [
bili_sched.run(),
raffle_handler.run(),
danmu_printer.run()
]
if other_tasks:
loop.run_until_complete(asyncio.wait(tasks + other_tasks))
loop.run_forever()
if console_thread is not None:
console_thread.join()
|
test_backend.py
|
import base64
import copy
import datetime
import threading
import time
import unittest
from datetime import timedelta
from unittest.mock import Mock, patch
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.core.cache import DEFAULT_CACHE_ALIAS, cache, caches
from django.test import override_settings
from django.utils import timezone
from redis.exceptions import ConnectionError
import django_redis.cache
from django_redis import pool
from django_redis.client import DefaultClient, ShardClient, herd
from django_redis.serializers.json import JSONSerializer
from django_redis.serializers.msgpack import MSGPackSerializer
herd.CACHE_HERD_TIMEOUT = 2
def make_key(key, prefix, version):
return f"{prefix}#{version}#{key}"
def reverse_key(key):
return key.split("#", 2)[2]
class DjangoRedisConnectionStrings(unittest.TestCase):
def test_connection_strings(self):
connection_strings = [
"unix://tmp/foo.bar?db=1",
"redis://localhost/2",
"rediss://localhost:3333?db=2",
]
cf = pool.get_connection_factory(options={})
for connection_string in connection_strings:
with self.subTest(connection_string):
res = cf.make_connection_params(connection_string)
self.assertEqual(res["url"], connection_string)
class DjangoRedisCacheTestEscapePrefix(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["default"]["KEY_PREFIX"] = "*"
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["default"]
self.other = caches["with_prefix"]
def tearDown(self):
self.cache.clear()
self.other.clear()
def test_delete_pattern(self):
self.cache.set("a", "1")
self.other.set("b", "2")
self.cache.delete_pattern("*")
self.assertIs(self.cache.has_key("a"), False)
self.assertEqual(self.other.get("b"), "2")
def test_iter_keys(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support iter_keys")
self.cache.set("a", "1")
self.other.set("b", "2")
self.assertEqual(list(self.cache.iter_keys("*")), ["a"])
def test_keys(self):
self.cache.set("a", "1")
self.other.set("b", "2")
keys = self.cache.keys("*")
self.assertIn("a", keys)
self.assertNotIn("b", keys)
class DjangoRedisCacheTestCustomKeyFunction(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["default"]["KEY_FUNCTION"] = "test_backend.make_key"
caches_setting["default"]["REVERSE_KEY_FUNCTION"] = "test_backend.reverse_key"
cm = override_settings(CACHES=caches_setting)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["default"]
def tearDown(self):
self.cache.clear()
def test_custom_key_function(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
res = self.cache.delete_pattern("*foo-a*")
self.assertTrue(bool(res))
keys = self.cache.keys("foo*")
self.assertEqual(set(keys), {"foo-bb", "foo-bc"})
# ensure our custom function was actually called
self.assertEqual(
{k.decode() for k in self.cache.client.get_client(write=False).keys("*")},
{"#1#foo-bc", "#1#foo-bb"},
)
class DjangoRedisCacheTests(unittest.TestCase):
def setUp(self):
self.cache = cache
def tearDown(self):
self.cache.clear()
def test_setnx(self):
# we should ensure there is no test_key_nx in redis
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
res = self.cache.set("test_key_nx", 1, nx=True)
self.assertTrue(res)
# test that second set will have
res = self.cache.set("test_key_nx", 2, nx=True)
self.assertFalse(res)
res = self.cache.get("test_key_nx")
self.assertEqual(res, 1)
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
def test_setnx_timeout(self):
# test that timeout still works for nx=True
res = self.cache.set("test_key_nx", 1, timeout=2, nx=True)
self.assertTrue(res)
time.sleep(3)
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
# test that timeout will not affect key, if it was there
self.cache.set("test_key_nx", 1)
res = self.cache.set("test_key_nx", 2, timeout=2, nx=True)
self.assertFalse(res)
time.sleep(3)
res = self.cache.get("test_key_nx")
self.assertEqual(res, 1)
self.cache.delete("test_key_nx")
res = self.cache.get("test_key_nx")
self.assertIsNone(res)
def test_unicode_keys(self):
self.cache.set("ключ", "value")
res = self.cache.get("ключ")
self.assertEqual(res, "value")
def test_save_and_integer(self):
self.cache.set("test_key", 2)
res = self.cache.get("test_key", "Foo")
self.assertIsInstance(res, int)
self.assertEqual(res, 2)
def test_save_string(self):
self.cache.set("test_key", "hello" * 1000)
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "hello" * 1000)
self.cache.set("test_key", "2")
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "2")
def test_save_unicode(self):
self.cache.set("test_key", "heló")
res = self.cache.get("test_key")
self.assertIsInstance(res, str)
self.assertEqual(res, "heló")
def test_save_dict(self):
if isinstance(
self.cache.client._serializer, (JSONSerializer, MSGPackSerializer)
):
# JSONSerializer and MSGPackSerializer use the isoformat for
# datetimes.
now_dt = datetime.datetime.now().isoformat()
else:
now_dt = datetime.datetime.now()
test_dict = {"id": 1, "date": now_dt, "name": "Foo"}
self.cache.set("test_key", test_dict)
res = self.cache.get("test_key")
self.assertIsInstance(res, dict)
self.assertEqual(res["id"], 1)
self.assertEqual(res["name"], "Foo")
self.assertEqual(res["date"], now_dt)
def test_save_float(self):
float_val = 1.345620002
self.cache.set("test_key", float_val)
res = self.cache.get("test_key")
self.assertIsInstance(res, float)
self.assertEqual(res, float_val)
def test_timeout(self):
self.cache.set("test_key", 222, timeout=3)
time.sleep(4)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_timeout_0(self):
self.cache.set("test_key", 222, timeout=0)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_timeout_parameter_as_positional_argument(self):
self.cache.set("test_key", 222, -1)
res = self.cache.get("test_key")
self.assertIsNone(res)
self.cache.set("test_key", 222, 1)
res1 = self.cache.get("test_key")
time.sleep(2)
res2 = self.cache.get("test_key")
self.assertEqual(res1, 222)
self.assertIsNone(res2)
# nx=True should not overwrite expire of key already in db
self.cache.set("test_key", 222, None)
self.cache.set("test_key", 222, -1, nx=True)
res = self.cache.get("test_key")
self.assertEqual(res, 222)
def test_timeout_negative(self):
self.cache.set("test_key", 222, timeout=-1)
res = self.cache.get("test_key")
self.assertIsNone(res)
self.cache.set("test_key", 222, timeout=None)
self.cache.set("test_key", 222, timeout=-1)
res = self.cache.get("test_key")
self.assertIsNone(res)
# nx=True should not overwrite expire of key already in db
self.cache.set("test_key", 222, timeout=None)
self.cache.set("test_key", 222, timeout=-1, nx=True)
res = self.cache.get("test_key")
self.assertEqual(res, 222)
def test_timeout_tiny(self):
self.cache.set("test_key", 222, timeout=0.00001)
res = self.cache.get("test_key")
self.assertIn(res, (None, 222))
def test_set_add(self):
self.cache.set("add_key", "Initial value")
res = self.cache.add("add_key", "New value")
self.assertIs(res, False)
res = cache.get("add_key")
self.assertEqual(res, "Initial value")
res = self.cache.add("other_key", "New value")
self.assertIs(res, True)
def test_get_many(self):
self.cache.set("a", 1)
self.cache.set("b", 2)
self.cache.set("c", 3)
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": 1, "b": 2, "c": 3})
def test_get_many_unicode(self):
self.cache.set("a", "1")
self.cache.set("b", "2")
self.cache.set("c", "3")
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": "1", "b": "2", "c": "3"})
def test_set_many(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"a": 1, "b": 2, "c": 3})
def test_set_call_empty_pipeline(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
pipeline = self.cache.client.get_client(write=True).pipeline()
key = "key"
value = "value"
with patch.object(pipeline, "set") as mocked_set:
self.cache.set(key, value, client=pipeline)
if isinstance(self.cache.client, herd.HerdClient):
default_timeout = self.cache.client._backend.default_timeout
herd_timeout = (default_timeout + herd.CACHE_HERD_TIMEOUT) * 1000
herd_pack_value = self.cache.client._pack(value, default_timeout)
mocked_set.assert_called_once_with(
self.cache.client.make_key(key, version=None),
self.cache.client.encode(herd_pack_value),
nx=False,
px=herd_timeout,
xx=False,
)
else:
mocked_set.assert_called_once_with(
self.cache.client.make_key(key, version=None),
self.cache.client.encode(value),
nx=False,
px=self.cache.client._backend.default_timeout * 1000,
xx=False,
)
def test_delete(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete("a")
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"b": 2, "c": 3})
res = self.cache.delete("a")
self.assertFalse(bool(res))
@patch("django_redis.cache.DJANGO_VERSION", (3, 1, 0, "final", 0))
def test_delete_return_value_type_new31(self):
"""delete() returns a boolean instead of int since django version 3.1"""
self.cache.set("a", 1)
res = self.cache.delete("a")
self.assertEqual(type(res), bool)
self.assertTrue(res)
res = self.cache.delete("b")
self.assertEqual(type(res), bool)
self.assertFalse(res)
@patch("django_redis.cache.DJANGO_VERSION", (3, 0, 1, "final", 0))
def test_delete_return_value_type_before31(self):
"""delete() returns a int before django version 3.1"""
self.cache.set("a", 1)
res = self.cache.delete("a")
self.assertEqual(type(res), int)
self.assertEqual(res, 1)
res = self.cache.delete("b")
self.assertEqual(type(res), int)
self.assertEqual(res, 0)
def test_delete_many(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertFalse(bool(res))
def test_delete_many_generator(self):
self.cache.set_many({"a": 1, "b": 2, "c": 3})
res = self.cache.delete_many(key for key in ["a", "b"])
self.assertTrue(bool(res))
res = self.cache.get_many(["a", "b", "c"])
self.assertEqual(res, {"c": 3})
res = self.cache.delete_many(["a", "b"])
self.assertFalse(bool(res))
def test_delete_many_empty_generator(self):
res = self.cache.delete_many(key for key in [])
self.assertFalse(bool(res))
def test_incr(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
self.cache.set("num", 1)
self.cache.incr("num")
res = self.cache.get("num")
self.assertEqual(res, 2)
self.cache.incr("num", 10)
res = self.cache.get("num")
self.assertEqual(res, 12)
# max 64 bit signed int
self.cache.set("num", 9223372036854775807)
self.cache.incr("num")
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775808)
self.cache.incr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775810)
self.cache.set("num", 3)
self.cache.incr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 5)
def test_incr_error(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
with self.assertRaises(ValueError):
# key does not exist
self.cache.incr("numnum")
def test_incr_ignore_check(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest(
"ShardClient doesn't support argument ignore_key_check to incr"
)
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support incr")
# key exists check will be skipped and the value will be incremented by
# '1' which is the default delta
self.cache.incr("num", ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 1)
self.cache.delete("num")
# since key doesnt exist it is set to the delta value, 10 in this case
self.cache.incr("num", 10, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 10)
self.cache.delete("num")
# following are just regression checks to make sure it still works as
# expected with incr max 64 bit signed int
self.cache.set("num", 9223372036854775807)
self.cache.incr("num", ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775808)
self.cache.incr("num", 2, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775810)
self.cache.set("num", 3)
self.cache.incr("num", 2, ignore_key_check=True)
res = self.cache.get("num")
self.assertEqual(res, 5)
def test_get_set_bool(self):
self.cache.set("bool", True)
res = self.cache.get("bool")
self.assertIsInstance(res, bool)
self.assertIs(res, True)
self.cache.set("bool", False)
res = self.cache.get("bool")
self.assertIsInstance(res, bool)
self.assertIs(res, False)
def test_decr(self):
if isinstance(self.cache.client, herd.HerdClient):
self.skipTest("HerdClient doesn't support decr")
self.cache.set("num", 20)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 19)
self.cache.decr("num", 20)
res = self.cache.get("num")
self.assertEqual(res, -1)
self.cache.decr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, -3)
self.cache.set("num", 20)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 19)
# max 64 bit signed int + 1
self.cache.set("num", 9223372036854775808)
self.cache.decr("num")
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775807)
self.cache.decr("num", 2)
res = self.cache.get("num")
self.assertEqual(res, 9223372036854775805)
def test_version(self):
self.cache.set("keytest", 2, version=2)
res = self.cache.get("keytest")
self.assertIsNone(res)
res = self.cache.get("keytest", version=2)
self.assertEqual(res, 2)
def test_incr_version(self):
self.cache.set("keytest", 2)
self.cache.incr_version("keytest")
res = self.cache.get("keytest")
self.assertIsNone(res)
res = self.cache.get("keytest", version=2)
self.assertEqual(res, 2)
def test_delete_pattern(self):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
res = self.cache.delete_pattern("*foo-a*")
self.assertTrue(bool(res))
keys = self.cache.keys("foo*")
self.assertEqual(set(keys), {"foo-bb", "foo-bc"})
res = self.cache.delete_pattern("*foo-a*")
self.assertFalse(bool(res))
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_custom_count(self, client_mock):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
self.cache.delete_pattern("*foo-a*", itersize=2)
client_mock.delete_pattern.assert_called_once_with("*foo-a*", itersize=2)
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_settings_default_scan_count(self, client_mock):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
self.cache.set(key, "foo")
expected_count = django_redis.cache.DJANGO_REDIS_SCAN_ITERSIZE
self.cache.delete_pattern("*foo-a*")
client_mock.delete_pattern.assert_called_once_with(
"*foo-a*", itersize=expected_count
)
@override_settings(DJANGO_REDIS_CLOSE_CONNECTION=True)
def test_close(self):
self.cache.set("f", "1")
self.cache.close()
def test_close_client(self):
with patch.object(self.cache.client, "close") as mock:
self.cache.close()
assert mock.called
def test_ttl(self):
cache = caches["default"]
# Test ttl
cache.set("foo", "bar", 10)
ttl = cache.ttl("foo")
if isinstance(cache.client, herd.HerdClient):
self.assertAlmostEqual(ttl, 12)
else:
self.assertAlmostEqual(ttl, 10)
# Test ttl None
cache.set("foo", "foo", timeout=None)
ttl = cache.ttl("foo")
self.assertIsNone(ttl)
# Test ttl with expired key
cache.set("foo", "foo", timeout=-1)
ttl = cache.ttl("foo")
self.assertEqual(ttl, 0)
# Test ttl with not existent key
ttl = cache.ttl("not-existent-key")
self.assertEqual(ttl, 0)
def test_persist(self):
self.cache.set("foo", "bar", timeout=20)
self.cache.persist("foo")
ttl = self.cache.ttl("foo")
self.assertIsNone(ttl)
def test_expire(self):
self.cache.set("foo", "bar", timeout=None)
self.cache.expire("foo", 20)
ttl = self.cache.ttl("foo")
self.assertAlmostEqual(ttl, 20)
def test_lock(self):
lock = self.cache.lock("foobar")
lock.acquire(blocking=True)
self.assertTrue(self.cache.has_key("foobar"))
lock.release()
self.assertFalse(self.cache.has_key("foobar"))
def test_lock_released_by_thread(self):
lock = self.cache.lock("foobar", thread_local=False)
lock.acquire(blocking=True)
def release_lock(lock_):
lock_.release()
t = threading.Thread(target=release_lock, args=[lock])
t.start()
t.join()
self.assertFalse(self.cache.has_key("foobar"))
def test_iter_keys(self):
cache = caches["default"]
if isinstance(cache.client, ShardClient):
self.skipTest("ShardClient doesn't support iter_keys")
cache.set("foo1", 1)
cache.set("foo2", 1)
cache.set("foo3", 1)
# Test simple result
result = set(cache.iter_keys("foo*"))
self.assertEqual(result, {"foo1", "foo2", "foo3"})
# Test limited result
result = list(cache.iter_keys("foo*", itersize=2))
self.assertEqual(len(result), 3)
# Test generator object
result = cache.iter_keys("foo*")
self.assertNotEqual(next(result), None)
def test_primary_replica_switching(self):
if isinstance(self.cache.client, ShardClient):
self.skipTest("ShardClient doesn't support get_client")
cache = caches["sample"]
client = cache.client
client._server = ["foo", "bar"]
client._clients = ["Foo", "Bar"]
self.assertEqual(client.get_client(write=True), "Foo")
self.assertEqual(client.get_client(write=False), "Bar")
def test_touch_zero_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", 0), True)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_touch_positive_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", 2), True)
self.assertEqual(self.cache.get("test_key"), 222)
time.sleep(3)
self.assertIsNone(self.cache.get("test_key"))
def test_touch_negative_timeout(self):
self.cache.set("test_key", 222, timeout=10)
self.assertIs(self.cache.touch("test_key", -1), True)
res = self.cache.get("test_key")
self.assertIsNone(res)
def test_touch_missed_key(self):
self.assertIs(self.cache.touch("test_key_does_not_exist", 1), False)
def test_touch_forever(self):
self.cache.set("test_key", "foo", timeout=1)
result = self.cache.touch("test_key", None)
self.assertIs(result, True)
self.assertIsNone(self.cache.ttl("test_key"))
time.sleep(2)
self.assertEqual(self.cache.get("test_key"), "foo")
def test_touch_forever_nonexistent(self):
result = self.cache.touch("test_key_does_not_exist", None)
self.assertIs(result, False)
def test_touch_default_timeout(self):
self.cache.set("test_key", "foo", timeout=1)
result = self.cache.touch("test_key")
self.assertIs(result, True)
time.sleep(2)
self.assertEqual(self.cache.get("test_key"), "foo")
def test_clear(self):
self.cache.set("foo", "bar")
value_from_cache = self.cache.get("foo")
self.assertEqual(value_from_cache, "bar")
self.cache.clear()
value_from_cache_after_clear = self.cache.get("foo")
self.assertIsNone(value_from_cache_after_clear)
class DjangoOmitExceptionsTests(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = True
cm = override_settings(
CACHES=caches_setting, DJANGO_REDIS_IGNORE_EXCEPTIONS=True
)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def test_get_many_returns_default_arg(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertEqual(self.cache.get_many(["key1", "key2", "key3"]), {})
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertIsNone(self.cache.get("key"))
self.assertEqual(self.cache.get("key", "default"), "default")
self.assertEqual(self.cache.get("key", default="default"), "default")
class DjangoOmitExceptionsPriority1Tests(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = True
cm = override_settings(
CACHES=caches_setting, DJANGO_REDIS_IGNORE_EXCEPTIONS=False
)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, True)
self.assertIsNone(self.cache.get("key"))
class DjangoOmitExceptionsPriority2Tests(unittest.TestCase):
def setUp(self):
caches_setting = copy.deepcopy(settings.CACHES)
caches_setting["doesnotexist"]["OPTIONS"]["IGNORE_EXCEPTIONS"] = False
cm = override_settings(
CACHES=caches_setting, DJANGO_REDIS_IGNORE_EXCEPTIONS=True
)
cm.enable()
self.addCleanup(cm.disable)
self.cache = caches["doesnotexist"]
def test_get(self):
self.assertIs(self.cache._ignore_exceptions, False)
with self.assertRaises(ConnectionError):
self.cache.get("key")
# Copied from Django's sessions test suite. Keep in sync with upstream.
# https://github.com/django/django/blob/main/tests/sessions_tests/tests.py
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get("cat"))
def test_store(self):
self.session["cat"] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop("cat"), "dog")
def test_pop(self):
self.session["some key"] = "exists"
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop("some key"), "exists")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get("some key"))
def test_pop_default(self):
self.assertEqual(
self.session.pop("some key", "does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(
self.session.pop("some key", default="does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop("some key")
def test_setdefault(self):
self.assertEqual(self.session.setdefault("foo", "bar"), "bar")
self.assertEqual(self.session.setdefault("foo", "baz"), "bar")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({"update key": 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get("update key"), 1)
def test_has_key(self):
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn("some key", self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ["x"])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session["foo"] = "bar"
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, "_session_cache"), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session["a"] = "b"
self.session.save()
self.assertEqual(self.session["a"], "b")
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend("1")
session.save()
self.assertNotEqual(session.session_key, "1")
self.assertIsNone(session.get("cat"))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete("1")
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ""
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = "1234567"
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = "12345678"
self.assertEqual(self.session.session_key, "12345678")
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {"a test key": "a test value"}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b"flaskdj:alkdjf").decode("ascii")
with self.assertLogs("django.security.SuspiciousSession", "WARNING") as cm:
self.assertEqual({}, self.session.decode(bad_encode))
# The failed decode is logged.
self.assertIn("corrupted", cm.output[0])
def test_actual_expiry(self):
# this doesn't work with JSONSerializer (serializing timedelta)
with override_settings(
SESSION_SERIALIZER="django.contrib.sessions.serializers.PickleSerializer"
):
self.session = self.backend() # reinitialize after overriding settings
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session["foo"] = "bar"
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn("foo", new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend("someunknownkey")
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, "someunknownkey")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
from django.contrib.sessions.backends.base import UpdateError
# Create new session.
s1 = self.backend()
s1["test_data"] = "value1"
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1["test_data"] = "value2"
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class SessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_actual_expiry(self):
if isinstance(
caches[DEFAULT_CACHE_ALIAS].client._serializer, MSGPackSerializer
):
self.skipTest("msgpack serializer doesn't support datetime serialization")
super().test_actual_expiry()
class TestClientClose(unittest.TestCase):
def setUp(self):
self.client = caches[DEFAULT_CACHE_ALIAS].client
self.client.set("TestClientClose", 0)
def tearDown(self):
self.client.delete("TestClientClose")
self.client.clear()
def test_close_client_disconnect_default(self):
with patch.object(self.client.connection_factory, "disconnect") as mock:
self.client.close()
assert not mock.called
@override_settings(DJANGO_REDIS_CLOSE_CONNECTION=True)
def test_close_disconnect_settings(self):
with patch.object(self.client.connection_factory, "disconnect") as mock:
self.client.close()
assert mock.called
def test_close_disconnect_settings_cache(self):
settings.CACHES[DEFAULT_CACHE_ALIAS]["OPTIONS"]["CLOSE_CONNECTION"] = True
with override_settings(CACHES=settings.CACHES):
# enabling override_settings context emits 'setting_changed' signal
# (re-set the value to populate again client connections)
self.client.set("TestClientClose", 0)
with patch.object(self.client.connection_factory, "disconnect") as mock:
self.client.close()
assert mock.called
def test_close_disconnect_client_options(self):
self.client._options["CLOSE_CONNECTION"] = True
with patch.object(self.client.connection_factory, "disconnect") as mock:
self.client.close()
assert mock.called
class TestDefaultClient(unittest.TestCase):
@patch("test_backend.DefaultClient.get_client")
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_get_client_given_no_client(
self, init_mock, get_client_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
client.delete_pattern(pattern="foo*")
get_client_mock.assert_called_once_with(write=True)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.DefaultClient.get_client", return_value=Mock())
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_make_pattern(
self, init_mock, get_client_mock, make_pattern_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*")
kwargs = {"version": None, "prefix": None}
# if not isinstance(caches['default'].client, ShardClient):
# kwargs['prefix'] = None
make_pattern_mock.assert_called_once_with("foo*", **kwargs)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.DefaultClient.get_client", return_value=Mock())
@patch("test_backend.DefaultClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter_with_count_if_itersize_given(
self, init_mock, get_client_mock, make_pattern_mock
):
client = DefaultClient()
client._backend = Mock()
client._backend.key_prefix = ""
get_client_mock.return_value.scan_iter.return_value = []
client.delete_pattern(pattern="foo*", itersize=90210)
get_client_mock.return_value.scan_iter.assert_called_once_with(
count=90210, match=make_pattern_mock.return_value
)
class TestShardClient(unittest.TestCase):
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter_with_count_if_itersize_given(
self, init_mock, make_pattern_mock
):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = []
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*", itersize=10)
connection.scan_iter.assert_called_once_with(
count=10, match=make_pattern_mock.return_value
)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_scan_iter(self, init_mock, make_pattern_mock):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = []
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*")
connection.scan_iter.assert_called_once_with(
match=make_pattern_mock.return_value
)
@patch("test_backend.DefaultClient.make_pattern")
@patch("test_backend.ShardClient.__init__", return_value=None)
def test_delete_pattern_calls_delete_for_given_keys(
self, init_mock, make_pattern_mock
):
client = ShardClient()
client._backend = Mock()
client._backend.key_prefix = ""
connection = Mock()
connection.scan_iter.return_value = [Mock(), Mock()]
connection.delete.return_value = 0
client._serverdict = {"test": connection}
client.delete_pattern(pattern="foo*")
connection.delete.assert_called_once_with(*connection.scan_iter.return_value)
|
crawling_summary.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
# +++++++++++++++++++++++++++++++++++++
# 최종 크롤러!!!
# ++++++++++++++++++++++++++++++++++++
from time import sleep
from time import sleep
from bs4 import BeautifulSoup
from multiprocessing import Process, Queue
import os
import platform
import calendar
import requests
import re
import datetime
import csv
import json
from lexrankr import LexRank
from apscheduler.schedulers.background import BackgroundScheduler
import time
#excepion
class ResponseTimeout(Exception):
def __init__(self):
self.message = "Couldn't get the data"
def __str__(self):
return str(self.message)
#article parser
class ArticleParser(object):
special_symbol = re.compile('[\{\}\[\]\/?,;:|\)*~`!^\-_+<>@\#$&▲▶◆◀■【】\\\=\(\'\"]')
content_pattern = re.compile('본문 내용|TV플레이어| 동영상 뉴스|flash 오류를 우회하기 위한 함수 추가function flash removeCallback|tt|앵커 멘트|xa0')
@classmethod
def clear_content(cls, text):
# 기사 본문에서 필요없는 특수문자 및 본문 양식 등을 다 지움
newline_symbol_removed_text = text.replace('\\n', '').replace('\\t', '').replace('\\r', '')
special_symbol_removed_content = re.sub(cls.special_symbol, ' ', newline_symbol_removed_text)
end_phrase_removed_content = re.sub(cls.content_pattern, '', special_symbol_removed_content)
blank_removed_content = re.sub(' +', ' ', end_phrase_removed_content).lstrip() # 공백 에러 삭제
reversed_content = ''.join(reversed(blank_removed_content)) # 기사 내용을 reverse 한다.
content = ''
for i in range(0, len(blank_removed_content)):
# reverse 된 기사 내용중, ".다"로 끝나는 경우 기사 내용이 끝난 것이기 때문에 기사 내용이 끝난 후의 광고, 기자 등의 정보는 다 지움
if reversed_content[i:i + 2] == '.다':
content = ''.join(reversed(reversed_content[i:]))
break
return content
@classmethod
def clear_headline(cls, text):
# 기사 제목에서 필요없는 특수문자들을 지움
newline_symbol_removed_text = text.replace('\\n', '').replace('\\t', '').replace('\\r', '')
special_symbol_removed_headline = re.sub(cls.special_symbol, '', newline_symbol_removed_text)
return special_symbol_removed_headline
@classmethod
def find_news_totalpage(cls, url):
# 당일 기사 목록 전체를 알아냄
try:
totlapage_url = url
request_content = requests.get(totlapage_url)
document_content = BeautifulSoup(request_content.content, 'html.parser')
headline_tag = document_content.find('div', {'class': 'paging'}).find('strong')
regex = re.compile(r'<strong>(?P<num>\d+)')
match = regex.findall(str(headline_tag))
return int(match[0])
except Exception:
return 0
#article crawler
class ArticleCrawler(object):
def __init__(self):
self.date = {'date': 0, 'time': 0}
self.user_operating_system = str(platform.system())
@staticmethod
def make_news_page_url(category_url, date):
made_urls = []
year,month,day=date.split('-')[0],date.split('-')[1],date.split('-')[2]
url = category_url + year + month + day
# totalpage는 네이버 페이지 구조를 이용해서 page=10000으로 지정해 totalpage를 알아냄
# page=10000을 입력할 경우 페이지가 존재하지 않기 때문에 page=totalpage로 이동 됨 (Redirect)
totalpage = ArticleParser.find_news_totalpage(url + "&page=10000")
for page in range(1, totalpage + 1):
made_urls.append(url + "&page=" + str(page))
return made_urls
@staticmethod
def get_url_data(url, max_tries=10):
remaining_tries = int(max_tries)
while remaining_tries > 0:
try:
return requests.get(url)
except requests.exceptions:
sleep(60)
remaining_tries = remaining_tries - 1
raise ResponseTimeout()
def crawling(self, category_name,q):
global old
self.categories = {'정치': 100, '경제': 101, '사회': 102, '생활문화': 103, '세계': 104, 'IT과학': 105, '오피니언': 110,
'politics': 100, 'economy': 101, 'society': 102, 'living_culture': 103, 'world': 104,
'IT_science': 105, 'opinion': 110}
# Multi Process PID
count = 0
print(category_name + " PID: " + str(os.getpid()))
#date 정의
now = str(datetime.datetime.now()).split()
self.date['date'] = now[0]
self.date['time'] = now[1]
# 기사 URL 형식(sid1은 category id, date는 date)
url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str(self.categories.get(category_name)) + "&date="
# 오늘 기사를 수집합니다[나중에는 시간 단위까지 포함시켜서 crawling]
day_urls = self.make_news_page_url(url, self.date['date'])
print(category_name + " Urls are generated")
print("The crawler starts")
print(old)
for URL in day_urls:
regex = re.compile("date=(\d+)")
news_date = regex.findall(URL)[0]
request = self.get_url_data(URL)
document = BeautifulSoup(request.content, 'html.parser')
# html - newsflash_body - type06_headline, type06
# 각 페이지에 있는 기사들 가져오기
post_temp = document.select('.newsflash_body .type06_headline li dl')
post_temp.extend(document.select('.newsflash_body .type06 li dl'))
# 각 페이지에 있는 기사들의 url 저장
post = []
for line in post_temp:
post.append(line.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음
del post_temp
self.new = post[0]
for content_url in post: # 기사 URL
# 크롤링 대기 시간
if category_name == "economy":
if content_url == old[0]:
old[0] = self.new
return
if category_name == "IT_science":
if content_url == old[1]:
old[1] = self.new
return
if category_name == "society":
if content_url == old[2]:
old[2] = self.new
return
if category_name == "politics":
if content_url == old[3]:
old[3] = self.new
return
sleep(0.01)
# 기사 HTML 가져옴
request_content = self.get_url_data(content_url)
try:
document_content = BeautifulSoup(request_content.content, 'html.parser')
except:
continue
try:
# 기사 제목 가져옴
tag_headline = document_content.find_all('h3', {'id': 'articleTitle'}, {'class': 'tts_head'})
text_headline = '' # 뉴스 기사 제목 초기화
text_headline = text_headline + ArticleParser.clear_headline(
str(tag_headline[0].find_all(text=True)))
if not text_headline: # 공백일 경우 기사 제외 처리
continue
# 기사 본문 가져옴
tag_content = document_content.find_all('div', {'id': 'articleBodyContents'})
text_sentence = '' # 뉴스 기사 본문 초기화
text_sentence = text_sentence + ArticleParser.clear_content(str(tag_content[0].find_all(text=True)))
# if not text_sentence: # 공백일 경우 기사 제외 처리
# continue
if len(text_sentence.split('. ')) < 5:
continue
# 기사 언론사 가져옴
tag_company = document_content.find_all('meta', {'property': 'me2:category1'})
text_company = '' # 언론사 초기화
text_company = text_company + str(tag_company[0].get('content'))
if not text_company: # 공백일 경우 기사 제외 처리
continue
#뉴스 작성된 시간 가져옴
tag_date = document_content.find_all('span', {'class': 't11'})
tag_date=re.sub('<.+?>','',(str(tag_date[0]))).strip()
text_date = '' #date 초기화
text_date = text_date + tag_date
tag_date_datetime = text_date.split()[0].split('.')
tag_date_datetime = '-'.join(
[tag_date_datetime[i] for i in range(len(tag_date_datetime) - 1)]) + " " + text_date.split()[2]
tag_date_datetime = datetime.datetime.strptime(tag_date_datetime, '%Y-%m-%d %H:%M')
if text_date.split()[1] == '오후':
tag_date_datetime += datetime.timedelta(hours=12)
if not text_date:
continue
resultdata = [news_date, category_name, text_company, text_headline, text_sentence, content_url, tag_date_datetime]
#print("c",resultdata)
q.put(resultdata)
count += 1
#print("put{}!".format(count))
del text_company, text_sentence, text_headline
del tag_company
del tag_content, tag_headline
del request_content, document_content
except Exception as ex: # UnicodeEncodeError ..
# wcsv.writerow([ex, content_url])
del request_content, document_content
pass
def start(self):
# MultiProcess crawling start(each category)
for category_name in self.selected_categories:
proc = Process(target=self.crawling, args=(category_name,q,))
proc.start()
count = 0
def smry(q):
while True:
try:
print("smry start")
global count
data = q.get()
count += 1
#print("get{}!".format(count))
lexrank = LexRank()
lexrank.summarize(data[4]) #data[4] (본문)가져와서 요약
summaries = lexrank.probe(3) #3줄요약, summaries 타입은 list
data[4] = '. '.join(summaries)+'.' #요약된 내용 다시 .으로 join후 저장
print(data) #db에 저장되어야 하는 최종 결과
db_store(data)
# for summary in summaries:
# print(summary)
except (IndexError,ValueError,AttributeError):
pass
# 입력데이터가 이상한 값이어서 요약문이 안나올 때 에러처리 #입력데이터가 None으로 들어올때 에러처리
def db_store(data):
URL = "http://34.84.147.192:8000/news/articles/"
data = {
"headline": data[3],
"summary": data[4],
"url": data[5],
"pub_date": data[6], # Shuld be in datetime format
"category": data[1], # This is news category
"cluster_id": "1f4f3d79-192a-409c-b824-091ae97bfccd", # This is Default cluster ID for null value
}
try:
res = requests.post(url=URL,
data=data).json() # This will post data to database and return the colume back and convert to json
print(res['news_id']) # This will show the newly created news id
res = requests.get(url=URL)
except Exception as e:
print(e)
if __name__ == "__main__":
q = Queue()
####크롤러####
Crawler = ArticleCrawler()
# ####스케줄러로 크롤러 제어####
sched = BackgroundScheduler()
sched.start()
old=[]
category={"economy":'https://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=101&oid=005&aid=0001260441',"IT_science":'https://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=105&oid=020&aid=0003254067'
,'society':'https://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=102&oid=005&aid=0001260447','politics':'https://news.naver.com/main/read.nhn?mode=LSD&mid=sec&sid1=100&oid=005&aid=0001260414'}
##경제 :5분/ 사회:3분/ 정치:7분/IT:15분.
if "economy" in category:
old.append(category["economy"])
sched.add_job(Crawler.crawling, 'interval', seconds=180, id='test_1',args=["economy", q]) # argssms 배열로 넣어주어야한다.
if "IT_science" in category:
old.append(category["IT_science"])
sched.add_job(Crawler.crawling, 'interval', seconds=900, id='test_2', args=["IT_science", q]) # argssms 배열로 넣어주어야한다.
if "society" in category:
old.append(category["society"],)
sched.add_job(Crawler.crawling, 'interval', seconds=180, id='test_3', args=["society", q]) # argssms 배열로 넣어주어야한다.
if "politics" in category:
old.append(category["politics"])
sched.add_job(Crawler.crawling, 'interval', seconds=420, id='test_4', args=["politics", q]) # argssms 배열로 넣어주어야한다.
####요약####
process_summary = Process(target=smry, args=(q,))
process_summary.start()
while True:
print("running!!")
time.sleep(1)
#Crawler.start()
q.close()
q.join_thread()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,no-self-use,too-many-locals,line-too-long,unused-argument
import errno
try:
import msvcrt
except ImportError:
# Not supported for Linux machines.
pass
import platform
import select
import shlex
import signal
import sys
import threading
import time
try:
import termios
import tty
except ImportError:
# Not supported for Windows machines.
pass
import websocket
import yaml
from knack.log import get_logger
from knack.prompting import prompt_pass, prompt, NoTTYException
from knack.util import CLIError
from azure.mgmt.containerinstance.models import (AzureFileVolume, Container, ContainerGroup, ContainerGroupNetworkProtocol,
ContainerPort, ImageRegistryCredential, IpAddress, Port, ResourceRequests,
ResourceRequirements, Volume, VolumeMount, ContainerExecRequestTerminalSize,
GitRepoVolume, LogAnalytics, ContainerGroupDiagnostics, ContainerGroupNetworkProfile,
ContainerGroupIpAddressType, ResourceIdentityType, ContainerGroupIdentity)
from azure.cli.core.util import sdk_no_wait
from ._client_factory import (cf_container_groups, cf_container, cf_log_analytics_workspace,
cf_log_analytics_workspace_shared_keys, cf_resource, cf_network)
logger = get_logger(__name__)
WINDOWS_NAME = 'Windows'
SERVER_DELIMITER = '.'
ACR_SERVER_DELIMITER = '.azurecr.io'
AZURE_FILE_VOLUME_NAME = 'azurefile'
SECRETS_VOLUME_NAME = 'secrets'
GITREPO_VOLUME_NAME = 'gitrepo'
MSI_LOCAL_ID = '[system]'
def list_containers(client, resource_group_name=None):
"""List all container groups in a resource group. """
if resource_group_name is None:
return client.list()
return client.list_by_resource_group(resource_group_name)
def get_container(client, resource_group_name, name):
"""Show details of a container group. """
return client.get(resource_group_name, name)
def delete_container(client, resource_group_name, name, **kwargs):
"""Delete a container group. """
return client.delete(resource_group_name, name)
# pylint: disable=too-many-statements
def create_container(cmd,
resource_group_name,
name=None,
image=None,
location=None,
cpu=1,
memory=1.5,
restart_policy='Always',
ports=None,
protocol=None,
os_type='Linux',
ip_address=None,
dns_name_label=None,
command_line=None,
environment_variables=None,
secure_environment_variables=None,
registry_login_server=None,
registry_username=None,
registry_password=None,
azure_file_volume_share_name=None,
azure_file_volume_account_name=None,
azure_file_volume_account_key=None,
azure_file_volume_mount_path=None,
log_analytics_workspace=None,
log_analytics_workspace_key=None,
vnet=None,
vnet_name=None,
vnet_address_prefix='10.0.0.0/16',
subnet=None,
subnet_address_prefix='10.0.0.0/24',
network_profile=None,
gitrepo_url=None,
gitrepo_dir='.',
gitrepo_revision=None,
gitrepo_mount_path=None,
secrets=None,
secrets_mount_path=None,
file=None,
assign_identity=None,
identity_scope=None,
identity_role='Contributor',
no_wait=False):
"""Create a container group. """
if file:
return _create_update_from_file(cmd.cli_ctx, resource_group_name, name, location, file, no_wait)
if not name:
raise CLIError("error: the --name/-n argument is required unless specified with a passed in file.")
if not image:
raise CLIError("error: the --image argument is required unless specified with a passed in file.")
ports = ports or [80]
protocol = protocol or ContainerGroupNetworkProtocol.tcp
container_resource_requirements = _create_resource_requirements(cpu=cpu, memory=memory)
image_registry_credentials = _create_image_registry_credentials(registry_login_server=registry_login_server,
registry_username=registry_username,
registry_password=registry_password,
image=image)
command = shlex.split(command_line) if command_line else None
volumes = []
mounts = []
azure_file_volume = _create_azure_file_volume(azure_file_volume_share_name=azure_file_volume_share_name,
azure_file_volume_account_name=azure_file_volume_account_name,
azure_file_volume_account_key=azure_file_volume_account_key)
azure_file_volume_mount = _create_azure_file_volume_mount(azure_file_volume=azure_file_volume,
azure_file_volume_mount_path=azure_file_volume_mount_path)
if azure_file_volume:
volumes.append(azure_file_volume)
mounts.append(azure_file_volume_mount)
secrets_volume = _create_secrets_volume(secrets)
secrets_volume_mount = _create_secrets_volume_mount(secrets_volume=secrets_volume,
secrets_mount_path=secrets_mount_path)
if secrets_volume:
volumes.append(secrets_volume)
mounts.append(secrets_volume_mount)
diagnostics = None
tags = {}
if log_analytics_workspace and log_analytics_workspace_key:
log_analytics = LogAnalytics(
workspace_id=log_analytics_workspace, workspace_key=log_analytics_workspace_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics
)
elif log_analytics_workspace and not log_analytics_workspace_key:
diagnostics, tags = _get_diagnostics_from_workspace(
cmd.cli_ctx, log_analytics_workspace)
if not diagnostics:
raise CLIError('Log Analytics workspace "' + log_analytics_workspace + '" not found.')
elif not log_analytics_workspace and log_analytics_workspace_key:
raise CLIError('"--log-analytics-workspace-key" requires "--log-analytics-workspace".')
gitrepo_volume = _create_gitrepo_volume(gitrepo_url=gitrepo_url, gitrepo_dir=gitrepo_dir, gitrepo_revision=gitrepo_revision)
gitrepo_volume_mount = _create_gitrepo_volume_mount(gitrepo_volume=gitrepo_volume, gitrepo_mount_path=gitrepo_mount_path)
if gitrepo_volume:
volumes.append(gitrepo_volume)
mounts.append(gitrepo_volume_mount)
# Concatenate secure and standard environment variables
if environment_variables and secure_environment_variables:
environment_variables = environment_variables + secure_environment_variables
else:
environment_variables = environment_variables or secure_environment_variables
identity = None
if assign_identity is not None:
identity = _build_identities_info(assign_identity)
# Set up VNET, subnet and network profile if needed
if subnet and not network_profile:
network_profile = _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix)
cg_network_profile = None
if network_profile:
cg_network_profile = ContainerGroupNetworkProfile(id=network_profile)
cgroup_ip_address = _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile)
container = Container(name=name,
image=image,
resources=container_resource_requirements,
command=command,
ports=[ContainerPort(
port=p, protocol=protocol) for p in ports] if cgroup_ip_address else None,
environment_variables=environment_variables,
volume_mounts=mounts or None)
cgroup = ContainerGroup(location=location,
identity=identity,
containers=[container],
os_type=os_type,
restart_policy=restart_policy,
ip_address=cgroup_ip_address,
image_registry_credentials=image_registry_credentials,
volumes=volumes or None,
network_profile=cg_network_profile,
diagnostics=diagnostics,
tags=tags)
container_group_client = cf_container_groups(cmd.cli_ctx)
lro = sdk_no_wait(no_wait, container_group_client.create_or_update, resource_group_name,
name, cgroup)
if assign_identity is not None and identity_scope:
from azure.cli.core.commands.arm import assign_identity
cg = container_group_client.get(resource_group_name, name)
assign_identity(cmd.cli_ctx, lambda: cg, lambda cg: cg, identity_role, identity_scope)
return lro
def _build_identities_info(identities):
identities = identities or []
identity_type = ResourceIdentityType.none
if not identities or MSI_LOCAL_ID in identities:
identity_type = ResourceIdentityType.system_assigned
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities and identity_type == ResourceIdentityType.system_assigned:
identity_type = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_type = ResourceIdentityType.user_assigned
identity = ContainerGroupIdentity(type=identity_type)
if external_identities:
identity.user_assigned_identities = {e: {} for e in external_identities}
return identity
def _get_resource(client, resource_group_name, *subresources):
from azure.core.exceptions import HttpResponseError
try:
resource = client.get(resource_group_name, *subresources)
return resource
except HttpResponseError as ex:
if ex.error.code == "NotFound" or ex.error.code == "ResourceNotFound":
return None
raise
def _get_vnet_network_profile(cmd, location, resource_group_name, vnet, vnet_address_prefix, subnet, subnet_address_prefix):
from azure.cli.core.profiles import ResourceType
from msrestazure.tools import parse_resource_id, is_valid_resource_id
aci_delegation_service_name = "Microsoft.ContainerInstance/containerGroups"
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
aci_delegation = Delegation(
name=aci_delegation_service_name,
service_name=aci_delegation_service_name
)
ncf = cf_network(cmd.cli_ctx)
vnet_name = vnet
subnet_name = subnet
if is_valid_resource_id(subnet):
parsed_subnet_id = parse_resource_id(subnet)
subnet_name = parsed_subnet_id['resource_name']
vnet_name = parsed_subnet_id['name']
resource_group_name = parsed_subnet_id['resource_group']
elif is_valid_resource_id(vnet):
parsed_vnet_id = parse_resource_id(vnet)
vnet_name = parsed_vnet_id['resource_name']
resource_group_name = parsed_vnet_id['resource_group']
default_network_profile_name = "aci-network-profile-{}-{}".format(vnet_name, subnet_name)
subnet = _get_resource(ncf.subnets, resource_group_name, vnet_name, subnet_name)
# For an existing subnet, validate and add delegation if needed
if subnet:
logger.info('Using existing subnet "%s" in resource group "%s"', subnet.name, resource_group_name)
for sal in (subnet.service_association_links or []):
if sal.linked_resource_type != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing service association links other than {}.".format(aci_delegation_service_name))
if not subnet.delegations:
logger.info('Adding ACI delegation to the existing subnet.')
subnet.delegations = [aci_delegation]
subnet = ncf.subnets.begin_create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
else:
for delegation in subnet.delegations:
if delegation.service_name != aci_delegation_service_name:
raise CLIError("Can not use subnet with existing delegations other than {}".format(aci_delegation_service_name))
network_profile = _get_resource(ncf.network_profiles, resource_group_name, default_network_profile_name)
if network_profile:
logger.info('Using existing network profile "%s"', default_network_profile_name)
return network_profile.id
# Create new subnet and Vnet if not exists
else:
Subnet, VirtualNetwork, AddressSpace = cmd.get_models('Subnet', 'VirtualNetwork',
'AddressSpace', resource_type=ResourceType.MGMT_NETWORK)
vnet = _get_resource(ncf.virtual_networks, resource_group_name, vnet_name)
if not vnet:
logger.info('Creating new vnet "%s" in resource group "%s"', vnet_name, resource_group_name)
ncf.virtual_networks.begin_create_or_update(resource_group_name,
vnet_name,
VirtualNetwork(name=vnet_name,
location=location,
address_space=AddressSpace(address_prefixes=[vnet_address_prefix])))
subnet = Subnet(
name=subnet_name,
location=location,
address_prefix=subnet_address_prefix,
delegations=[aci_delegation])
logger.info('Creating new subnet "%s" in resource group "%s"', subnet_name, resource_group_name)
subnet = ncf.subnets.begin_create_or_update(resource_group_name, vnet_name, subnet_name, subnet).result()
NetworkProfile, ContainerNetworkInterfaceConfiguration, IPConfigurationProfile = cmd.get_models('NetworkProfile',
'ContainerNetworkInterfaceConfiguration',
'IPConfigurationProfile',
resource_type=ResourceType.MGMT_NETWORK)
# In all cases, create the network profile with aci NIC
network_profile = NetworkProfile(
name=default_network_profile_name,
location=location,
container_network_interface_configurations=[ContainerNetworkInterfaceConfiguration(
name="eth0",
ip_configurations=[IPConfigurationProfile(
name="ipconfigprofile",
subnet=subnet
)]
)]
)
logger.info('Creating network profile "%s" in resource group "%s"', default_network_profile_name, resource_group_name)
network_profile = ncf.network_profiles.create_or_update(resource_group_name, default_network_profile_name, network_profile).result()
return network_profile.id
def _get_diagnostics_from_workspace(cli_ctx, log_analytics_workspace):
from msrestazure.tools import parse_resource_id
log_analytics_workspace_client = cf_log_analytics_workspace(cli_ctx)
log_analytics_workspace_shared_keys_client = cf_log_analytics_workspace_shared_keys(cli_ctx)
for workspace in log_analytics_workspace_client.list():
if log_analytics_workspace in (workspace.name, workspace.customer_id):
keys = log_analytics_workspace_shared_keys_client.get_shared_keys(
parse_resource_id(workspace.id)['resource_group'], workspace.name)
log_analytics = LogAnalytics(
workspace_id=workspace.customer_id, workspace_key=keys.primary_shared_key)
diagnostics = ContainerGroupDiagnostics(
log_analytics=log_analytics)
return (diagnostics, {'oms-resource-link': workspace.id})
return None, {}
def _create_update_from_file(cli_ctx, resource_group_name, name, location, file, no_wait):
resource_client = cf_resource(cli_ctx)
container_group_client = cf_container_groups(cli_ctx)
cg_defintion = None
try:
with open(file, 'r') as f:
cg_defintion = yaml.safe_load(f)
except OSError: # FileNotFoundError introduced in Python 3
raise CLIError("No such file or directory: " + file)
except yaml.YAMLError as e:
raise CLIError("Error while parsing yaml file:\n\n" + str(e))
# Validate names match if both are provided
if name and cg_defintion.get('name', None):
if name != cg_defintion.get('name', None):
raise CLIError("The name parameter and name from yaml definition must match.")
else:
# Validate at least one name is provided
name = name or cg_defintion.get('name', None)
if cg_defintion.get('name', None) is None and not name:
raise CLIError("The name of the container group is required")
cg_defintion['name'] = name
location = location or cg_defintion.get('location', None)
if not location:
location = resource_client.resource_groups.get(resource_group_name).location
cg_defintion['location'] = location
api_version = cg_defintion.get('apiVersion', None) or container_group_client.api_version
return sdk_no_wait(no_wait,
resource_client.resources.create_or_update,
resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
api_version,
cg_defintion)
# pylint: disable=inconsistent-return-statements
def _create_resource_requirements(cpu, memory):
"""Create resource requirements. """
if cpu or memory:
container_resource_requests = ResourceRequests(memory_in_gb=memory, cpu=cpu)
return ResourceRequirements(requests=container_resource_requests)
def _create_image_registry_credentials(registry_login_server, registry_username, registry_password, image):
"""Create image registry credentials. """
image_registry_credentials = None
if registry_login_server:
if not registry_username:
raise CLIError('Please specify --registry-username in order to use custom image registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use custom image registry.')
image_registry_credentials = [ImageRegistryCredential(server=registry_login_server,
username=registry_username,
password=registry_password)]
elif ACR_SERVER_DELIMITER in image.split("/")[0]:
if not registry_username:
try:
registry_username = prompt(msg='Image registry username: ')
except NoTTYException:
raise CLIError('Please specify --registry-username in order to use Azure Container Registry.')
if not registry_password:
try:
registry_password = prompt_pass(msg='Image registry password: ')
except NoTTYException:
raise CLIError('Please specify --registry-password in order to use Azure Container Registry.')
acr_server = image.split("/")[0] if image.split("/") else None
if acr_server:
image_registry_credentials = [ImageRegistryCredential(server=acr_server,
username=registry_username,
password=registry_password)]
elif registry_username and registry_password and SERVER_DELIMITER in image.split("/")[0]:
login_server = image.split("/")[0] if image.split("/") else None
if login_server:
image_registry_credentials = [ImageRegistryCredential(server=login_server,
username=registry_username,
password=registry_password)]
else:
raise CLIError('Failed to parse login server from image name; please explicitly specify --registry-server.')
return image_registry_credentials
def _create_azure_file_volume(azure_file_volume_share_name, azure_file_volume_account_name, azure_file_volume_account_key):
"""Create Azure File volume. """
azure_file_volume = None
if azure_file_volume_share_name:
if not azure_file_volume_account_name:
raise CLIError('Please specify --azure-file-volume-account-name in order to use Azure File volume.')
if not azure_file_volume_account_key:
try:
azure_file_volume_account_key = prompt_pass(msg='Azure File storage account key: ')
except NoTTYException:
raise CLIError('Please specify --azure-file-volume-account-key in order to use Azure File volume.')
azure_file_volume = AzureFileVolume(share_name=azure_file_volume_share_name,
storage_account_name=azure_file_volume_account_name,
storage_account_key=azure_file_volume_account_key)
return Volume(name=AZURE_FILE_VOLUME_NAME, azure_file=azure_file_volume) if azure_file_volume else None
def _create_secrets_volume(secrets):
"""Create secrets volume. """
return Volume(name=SECRETS_VOLUME_NAME, secret=secrets) if secrets else None
def _create_gitrepo_volume(gitrepo_url, gitrepo_dir, gitrepo_revision):
"""Create Git Repo volume. """
gitrepo_volume = GitRepoVolume(repository=gitrepo_url, directory=gitrepo_dir, revision=gitrepo_revision)
return Volume(name=GITREPO_VOLUME_NAME, git_repo=gitrepo_volume) if gitrepo_url else None
# pylint: disable=inconsistent-return-statements
def _create_azure_file_volume_mount(azure_file_volume, azure_file_volume_mount_path):
"""Create Azure File volume mount. """
if azure_file_volume_mount_path:
if not azure_file_volume:
raise CLIError('Please specify --azure-file-volume-share-name --azure-file-volume-account-name --azure-file-volume-account-key '
'to enable Azure File volume mount.')
return VolumeMount(name=AZURE_FILE_VOLUME_NAME, mount_path=azure_file_volume_mount_path)
def _create_secrets_volume_mount(secrets_volume, secrets_mount_path):
"""Create secrets volume mount. """
if secrets_volume:
if not secrets_mount_path:
raise CLIError('Please specify --secrets --secrets-mount-path '
'to enable secrets volume mount.')
return VolumeMount(name=SECRETS_VOLUME_NAME, mount_path=secrets_mount_path)
def _create_gitrepo_volume_mount(gitrepo_volume, gitrepo_mount_path):
"""Create Git Repo volume mount. """
if gitrepo_mount_path:
if not gitrepo_volume:
raise CLIError('Please specify --gitrepo-url (--gitrepo-dir --gitrepo-revision) '
'to enable Git Repo volume mount.')
return VolumeMount(name=GITREPO_VOLUME_NAME, mount_path=gitrepo_mount_path)
# pylint: disable=inconsistent-return-statements
def _create_ip_address(ip_address, ports, protocol, dns_name_label, network_profile):
"""Create IP address. """
if (ip_address and ip_address.lower() == 'public') or dns_name_label:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
dns_name_label=dns_name_label, type=ContainerGroupIpAddressType.public)
if network_profile:
return IpAddress(ports=[Port(protocol=protocol, port=p) for p in ports],
type=ContainerGroupIpAddressType.private)
# pylint: disable=inconsistent-return-statements
def container_logs(cmd, resource_group_name, name, container_name=None, follow=False):
"""Tail a container instance log. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
if not follow:
log = container_client.list_logs(resource_group_name, name, container_name)
print(log.content)
else:
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_logs,
stream_args=(container_client, resource_group_name, name, container_name, container_group.restart_policy))
def container_export(cmd, resource_group_name, name, file):
resource_client = cf_resource(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
resource = resource_client.resources.get(resource_group_name,
"Microsoft.ContainerInstance",
'',
"containerGroups",
name,
container_group_client.api_version,
False).__dict__
# Remove unwanted properites
resource['properties'].pop('instanceView', None)
resource.pop('sku', None)
resource.pop('id', None)
resource.pop('plan', None)
resource.pop('kind', None)
resource.pop('managed_by', None)
resource['properties'].pop('provisioningState', None)
# Correctly export the identity
try:
identity = resource['identity'].type
if identity != ResourceIdentityType.none:
resource['identity'] = resource['identity'].__dict__
identity_entry = {'type': resource['identity']['type'].value}
if resource['identity']['user_assigned_identities']:
identity_entry['user_assigned_identities'] = {k: {} for k in resource['identity']['user_assigned_identities']}
resource['identity'] = identity_entry
except (KeyError, AttributeError):
resource.pop('indentity', None)
# Remove container instance views
for i in range(len(resource['properties']['containers'])):
resource['properties']['containers'][i]['properties'].pop('instanceView', None)
# Add the api version
resource['apiVersion'] = container_group_client.api_version
with open(file, 'w+') as f:
yaml.safe_dump(resource, f, default_flow_style=False)
def container_exec(cmd, resource_group_name, name, exec_command, container_name=None, terminal_row_size=20, terminal_col_size=80):
"""Start exec for a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
if container_name or container_name is None and len(container_group.containers) == 1:
# If only one container in container group, use that container.
if container_name is None:
container_name = container_group.containers[0].name
terminal_size = ContainerExecRequestTerminalSize(rows=terminal_row_size, cols=terminal_col_size)
execContainerResponse = container_client.execute_command(resource_group_name, name, container_name, exec_command, terminal_size)
if platform.system() is WINDOWS_NAME:
_start_exec_pipe_win(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
_start_exec_pipe(execContainerResponse.web_socket_uri, execContainerResponse.password)
else:
raise CLIError('--container-name required when container group has more than one container.')
def _start_exec_pipe_win(web_socket_uri, password):
def _on_ws_open(ws):
ws.send(password)
t = threading.Thread(target=_capture_stdin, args=[ws])
t.daemon = True
t.start()
ws = websocket.WebSocketApp(web_socket_uri, on_open=_on_ws_open, on_message=_on_ws_msg)
ws.run_forever()
def _on_ws_msg(ws, msg):
sys.stdout.write(msg)
sys.stdout.flush()
def _capture_stdin(ws):
while True:
if msvcrt.kbhit:
x = msvcrt.getch()
ws.send(x)
def _start_exec_pipe(web_socket_uri, password):
ws = websocket.create_connection(web_socket_uri)
oldtty = termios.tcgetattr(sys.stdin)
old_handler = signal.getsignal(signal.SIGWINCH)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
ws.send(password)
while True:
try:
if not _cycle_exec_pipe(ws):
break
except (select.error, IOError) as e:
if e.args and e.args[0] == errno.EINTR:
pass
else:
raise
except websocket.WebSocketException:
pass
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
signal.signal(signal.SIGWINCH, old_handler)
def _cycle_exec_pipe(ws):
r, _, _ = select.select([ws.sock, sys.stdin], [], [])
if ws.sock in r:
data = ws.recv()
if not data:
return False
sys.stdout.write(data)
sys.stdout.flush()
if sys.stdin in r:
x = sys.stdin.read(1)
if not x:
return True
ws.send(x)
return True
def attach_to_container(cmd, resource_group_name, name, container_name=None):
"""Attach to a container. """
container_client = cf_container(cmd.cli_ctx)
container_group_client = cf_container_groups(cmd.cli_ctx)
container_group = container_group_client.get(resource_group_name, name)
# If container name is not present, use the first container.
if container_name is None:
container_name = container_group.containers[0].name
_start_streaming(
terminate_condition=_is_container_terminated,
terminate_condition_args=(container_group_client, resource_group_name, name, container_name),
shupdown_grace_period=5,
stream_target=_stream_container_events_and_logs,
stream_args=(container_group_client, container_client, resource_group_name, name, container_name))
def _start_streaming(terminate_condition, terminate_condition_args, shupdown_grace_period, stream_target, stream_args):
"""Start streaming for the stream target. """
import colorama
colorama.init()
try:
t = threading.Thread(target=stream_target, args=stream_args)
t.daemon = True
t.start()
while not terminate_condition(*terminate_condition_args) and t.is_alive():
time.sleep(10)
time.sleep(shupdown_grace_period)
finally:
colorama.deinit()
def _stream_logs(client, resource_group_name, name, container_name, restart_policy):
"""Stream logs for a container. """
lastOutputLines = 0
while True:
log = client.list_logs(resource_group_name, name, container_name)
lines = log.content.split('\n')
currentOutputLines = len(lines)
# Should only happen when the container restarts.
if currentOutputLines < lastOutputLines and restart_policy != 'Never':
print("Warning: you're having '--restart-policy={}'; the container '{}' was just restarted; the tail of the current log might be missing. Exiting...".format(restart_policy, container_name))
break
_move_console_cursor_up(lastOutputLines)
print(log.content)
lastOutputLines = currentOutputLines
time.sleep(2)
def _stream_container_events_and_logs(container_group_client, container_client, resource_group_name, name, container_name):
"""Stream container events and logs. """
lastOutputLines = 0
lastContainerState = None
while True:
container_group, container = _find_container(container_group_client, resource_group_name, name, container_name)
container_state = 'Unknown'
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state:
container_state = container.instance_view.current_state.state
_move_console_cursor_up(lastOutputLines)
if container_state != lastContainerState:
print("Container '{}' is in state '{}'...".format(container_name, container_state))
currentOutputLines = 0
if container.instance_view and container.instance_view.events:
for event in sorted(container.instance_view.events, key=lambda e: e.last_timestamp):
print('(count: {}) (last timestamp: {}) {}'.format(event.count, event.last_timestamp, event.message))
currentOutputLines += 1
lastOutputLines = currentOutputLines
lastContainerState = container_state
if container_state == 'Running':
print('\nStart streaming logs:')
break
time.sleep(2)
_stream_logs(container_client, resource_group_name, name, container_name, container_group.restart_policy)
def _is_container_terminated(client, resource_group_name, name, container_name):
"""Check if a container should be considered terminated. """
container_group, container = _find_container(client, resource_group_name, name, container_name)
# If a container group is terminated, assume the container is also terminated.
if container_group.instance_view and container_group.instance_view.state:
if container_group.instance_view.state == 'Succeeded' or container_group.instance_view.state == 'Failed':
return True
# If the restart policy is Always, assume the container will be restarted.
if container_group.restart_policy:
if container_group.restart_policy == 'Always':
return False
# Only assume the container is terminated if its state is Terminated.
if container.instance_view and container.instance_view.current_state and container.instance_view.current_state.state == 'Terminated':
return True
return False
def _find_container(client, resource_group_name, name, container_name):
"""Find a container in a container group. """
container_group = client.get(resource_group_name, name)
containers = [c for c in container_group.containers if c.name == container_name]
if len(containers) != 1:
raise CLIError("Found 0 or more than 1 container with name '{}'".format(container_name))
return container_group, containers[0]
def _move_console_cursor_up(lines):
"""Move console cursor up. """
if lines > 0:
# Use stdout.write to support Python 2
sys.stdout.write('\033[{}A\033[K\033[J'.format(lines))
def _gen_guid():
import uuid
return uuid.uuid4()
|
launch_data_pkt_sim.py
|
import time
def load_config():
fin = open("host.config", "r")
host_info = []
while True:
try:
host_name, host_mnet_pid = fin.readline().split()[0], int(fin.readline())
host_info.append((host_name, host_mnet_pid))
print(host_name, host_mnet_pid)
_ = fin.readline()
except:
break
return host_info
import threading
class Main_Local_Thread(threading.Thread):
def __init__(self, host_name, host_mnet_pid):
self.host_name = host_name
self.host_mnet_pid = host_mnet_pid
def run(self):
import os
os.system("python data_pkt_simulator.py -n {} -p {}".format(self.host_name, self.host_mnet_pid))
if __name__ == "__main__":
host_info = load_config()
for host_name, host_mnet_pid in host_info:
td = threading.Thread(target=Main_Local_Thread(host_name, host_mnet_pid).run)
td.start()
|
mp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import alfred
import os
import plistlib
import json
import codecs
import urllib2
import logging
import inspect
from subprocess import call
import tempfile
import shutil
import logging.handlers
import threading
import Queue
class Workflow():
def __init__(self, dirname):
self.dirname = dirname
self.alleyoop = False
plist_file = 'info.plist' if os.path.exists(os.path.join(dirname, 'info.plist')) else 'Info.plist'
plist_file = os.path.join(dirname, plist_file)
if os.path.exists(plist_file):
plist = plistlib.readPlist(plist_file)
self.name = plist['name']
self.disabled = plist.get('disabled', False)
self.description = plist['description']
self.icon = os.path.join(dirname, 'icon.png')
if not self.disabled:
# hunt for update.json
for (b, d, f) in os.walk(self.dirname):
if 'update.json' in f:
try:
with codecs.open(os.path.join(b, 'update.json'), "r", "utf-8") as f:
info = json.load(f)
self.alleyoop = True
self.version = info['version']
self.remote = info['remote_json']
autolog('%s is Alleyoop compatible' % self.name)
break
except Exception, e:
autolog('%s invalid update.json - %s' % (self.name, str(e)))
else:
autolog('no info.plist found at ' + dirname)
def check_for_update(self):
if self.disabled:
return
self.has_update = False
self.has_check_errors = False
self.error = ''
if self.alleyoop:
try:
autolog('%s checking for update' % self.name)
autolog('%s remote url - %s' % (self.name, self.remote))
update_data = json.loads(urllib2.urlopen(self.remote).read())
if self.version < update_data['version']:
self.has_update = True
self.update = {}
self.update['version'] = update_data['version']
self.update['description'] = update_data['description']
if 'download_url' in update_data:
self.update['download'] = update_data['download_url']
autolog('%s download_url - %s' % (self.name, self.update['download']))
else:
self.update['download'] = update_data['download_uri']
autolog('%s download_uri - %s' % (self.name, self.update['download']))
else:
autolog('%s already on latest version - %f' % (self.name, self.version))
except Exception, e:
self.has_check_errors = True
self.error = 'Could not check for updates'
autolog('%s error when checking for update - %s' % (self.name, str(e)))
return self.has_update
def to_dict(self):
ignored = ['check_for_update', 'to_dict', 'log']
attributes = [c for c in dir(self) if not c.startswith('_') and c not in ignored]
d = {}
for a in attributes:
d[a] = getattr(self, a)
return d
def show_options():
"""Displays initial options"""
feedback = alfred.Feedback()
feedback.addItem(title='List compatible workflows', autocomplete='list', valid='no')
feedback.addItem(title='Check for updates', subtitle='This may take a while...' if get_updateable_timeout() <= 10.0 else '', autocomplete='update', valid='no')
feedback.addItem(title='Reset cache', autocomplete='reset', valid='no')
feedback.addItem(title='View log', autocomplete='log', valid='no')
feedback.output()
def get_compatible():
"""Gets a list if compatible workflows"""
basedir = os.path.dirname(os.path.abspath('.'))
workflow_dirs = [f for f in os.listdir(basedir) if os.path.isdir(os.path.join(basedir, f))]
workflows = []
for d in workflow_dirs:
workflows.append(Workflow(os.path.join(basedir, d)))
workflows = [w for w in workflows if w.alleyoop]
autolog('found %s compatible workflows' % len(workflows))
return sorted(workflows, key=lambda x: x.name)
def list_compatible():
"""Displays all Alleyoop compatible workflows"""
workflows = get_compatible()
feedback = alfred.Feedback()
for w in workflows:
subtitle = 'v' + unicode(w.version) + ' ' + w.description
feedback.addItem(title=w.name, subtitle=subtitle, icon=w.icon, valid='no')
if feedback.isEmpty():
feedback.addItem(title='No compatible workflows found', valid='no', autocomplete='')
else:
feedback.addItem(title='Go back', valid='no', icon='back.png', autocomplete='')
feedback.output()
def check_update(work_q, done_q, total):
while True:
w = work_q.get()
w.check_for_update()
done_q.put(w)
work_q.task_done()
def cache_updateable():
work_q = Queue.Queue()
done_q = Queue.Queue()
workflows = get_compatible()
# create a fixed numbner of threads
for i in range(10):
t = threading.Thread(target=check_update, args=(work_q, done_q, len(workflows)))
t.daemon = True
t.start()
alfred.notify("Monkey Patch", "Checking updates for %i workflows" % (len(workflows)), text='Please wait...', sound=False)
for i, w in enumerate(workflows):
# w.check_for_update()
work_q.put(w)
work_q.join()
alfred.notify("Monkey Patch", "Checking updates done", sound=False)
workflows = [w.to_dict() for w in workflows if w.has_update or w.has_check_errors]
alfred.cache.set('workflow.update', workflows, expire=86400)
def get_updateable(force=True):
cache_timeout = get_updateable_timeout()
autolog('cache_timeout is: ' + str(cache_timeout))
if force and cache_timeout == -1:
cache_updateable()
if not force and cache_timeout == -1:
return None
return alfred.cache.get('workflow.update')
def get_updateable_timeout():
return alfred.cache.timeout('workflow.update')
def list_updates():
"""Displays all available updates"""
workflows = get_updateable()
feedback = alfred.Feedback()
# if we have at least one item with updates
valid_updates = [w for w in workflows if w['has_update'] and not w['has_check_errors']]
if len(valid_updates) > 0:
feedback.addItem(title='Download', valid='yes', arg='download-all')
for w in workflows:
if w['has_update'] and not w['has_check_errors']:
subtitle = 'v' + str(w['version']) + u' ➔ ' + str(w['update']['version']) + ' ' + w['update']['description']
feedback.addItem(title=w['name'], subtitle=subtitle, icon=w['icon'], arg='download "%s"' % w['dirname'])
elif w['has_check_errors']:
feedback.addItem(title=w['name'], subtitle=w['error'], icon='bad.png', valid='no', arg=w['dirname'])
if feedback.isEmpty():
feedback.addItem(title='All your workflows are up to date', valid='no', icon='uptodate.png', autocomplete='')
else:
feedback.addItem(title='Go back', valid='no', icon='back.png', autocomplete='')
feedback.output()
def reset():
"""Resets the cache"""
show_state('Resetting...')
alfred.cache.delete('workflow.update')
alfred.show('mp ')
def logfile():
return os.path.join(alfred.work(False), 'monkeypatch_log.txt')
def show_state(state):
feedback = alfred.Feedback()
feedback.addItem(title='state')
feedback.output()
def openlog():
show_state('Opening log folder...')
call(['open', os.path.dirname(logfile())])
def autolog(message):
"""Automatically log the current function details."""
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
logging.debug("%s: %s() in %s:%i" % (
message,
func.co_name,
func.co_filename,
func.co_firstlineno
))
def download_all():
"""Downloads all available updates"""
workflows = get_updateable()
workflows = [w for w in workflows if w['has_update'] and not w['has_check_errors']]
for i, w in enumerate(workflows):
download(w['dirname'], w_cached=w, current=i+1, total=len(workflows))
print "Updates downloaded"
def download(w_dir, w_cached=None, direct=False, current=None, total=None):
try:
if w_cached is None:
workflows = get_updateable()
w_cached = [w for w in workflows if w['has_update'] and not w['has_check_errors'] and w['dirname'] == w_dir][0]
w = w_cached
download_file = os.path.join(os.path.expanduser("~/Downloads"), "{0} v{1}.alfredworkflow".format(w['name'], w['update']['version']))
if os.path.exists(download_file):
os.remove(download_file)
tmp = tempfile.mkdtemp()
f = tempfile.NamedTemporaryFile(suffix=".alfredworkflow", dir=tmp, delete=False)
f.write(urllib2.urlopen(w['update']['download']).read())
f.close()
shutil.copy(f.name, download_file)
info = "Downloaded"
if current is not None and total is not None:
info = "Downloaded %i of %i" % (current, total)
alfred.notify("Monkey Patch", info, text=os.path.basename(download_file), sound=False)
autolog(info + ' ' + os.path.basename(download_file))
# we can remove this entry from our cache
if direct:
updateables = get_updateable(force=False)
if updateables is not None:
for i, u in enumerate(updateables):
if u['dirname'] == w['dirname']:
del updateables[i]
break
alfred.cache.set('workflow.update', updateables, expire=86400)
call(['open', download_file])
except Exception, e:
w_name = w_dir
if w is not None:
w_name = w['name']
autolog('error while trying to download %s - %s' % (w_name, str(e)))
alfred.notify("Monkey Patch", "Download error", text=w_name, sound=False)
def main():
logging.basicConfig(filename=logfile(), level=logging.DEBUG)
try:
args = alfred.args()
command, options = (args[0:]) if len(args) > 1 else (args[0], None)
command_switch = {
'list': lambda x: list_compatible(),
'update': lambda x: list_updates(),
'reset': lambda x: reset(),
'log': lambda x: openlog(),
'download-all': lambda x: download_all(),
'download': lambda w: download(w, direct=True)
}
if command in command_switch:
command_switch[command](options)
else:
show_options()
except Exception, e:
print str(e)
autolog(str(e))
if __name__ == '__main__':
main()
|
installwizard.py
|
import sys
import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
import electrum_ltc as electrum
from electrum_ltc import Wallet, WalletStorage
from electrum_ltc.util import UserCancelled, InvalidPassword
from electrum_ltc.base_wizard import BaseWizard
from electrum_ltc.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Litecoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-LTC - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-ltc.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-LTC wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
except IOError:
self.storage = None
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
dash_buffer.py
|
from __future__ import division
import queue
import threading
import time
import csv
import os
import config_dash
from stop_watch import StopWatch
# Durations in seconds
PLAYER_STATES = ['INITIALIZED', 'INITIAL_BUFFERING', 'PLAY',
'PAUSE', 'BUFFERING', 'STOP', 'END']
EXIT_STATES = ['STOP', 'END']
class DashPlayer:
""" DASH buffer class """
def __init__(self, video_length, segment_duration):
config_dash.LOG.info("Initializing the Buffer")
self.player_thread = None
self.playback_start_time = None
self.playback_duration = video_length
self.segment_duration = segment_duration
#print "video_length = {}".format(video_length)
#print "segment_duration = {}".format(segment_duration)
# Timers to keep track of playback time and the actual time
self.playback_timer = StopWatch()
self.actual_start_time = None
# Playback State
self.playback_state = "INITIALIZED"
self.playback_state_lock = threading.Lock()
# Buffer size
if config_dash.MAX_BUFFER_SIZE:
self.max_buffer_size = config_dash.MAX_BUFFER_SIZE
else:
self.max_buffer_size = video_length
# Duration of the current buffer
self.buffer_length = 0
self.buffer_length_lock = threading.Lock()
# Buffer Constants
self.initial_buffer = config_dash.INITIAL_BUFFERING_COUNT
self.alpha = config_dash.ALPHA_BUFFER_COUNT
self.beta = config_dash.BETA_BUFFER_COUNT
self.segment_limit = None
# Current video buffer that holds the segment data
self.buffer = queue.Queue()
self.buffer_lock = threading.Lock()
self.current_segment = None
self.buffer_log_file = config_dash.BUFFER_LOG_FILENAME
self.do_request = True
config_dash.LOG.info("VideoLength={},segmentDuration={},MaxBufferSize={},InitialBuffer(secs)={},"
"BufferAlph(secs)={},BufferBeta(secs)={}".format(self.playback_duration,
self.segment_duration,
self.max_buffer_size, self.initial_buffer,
self.alpha, self.beta))
def set_state(self, state):
""" Function to set the state of the player"""
state = state.upper()
if state in PLAYER_STATES:
self.playback_state_lock.acquire()
config_dash.LOG.info("Changing state from {} to {} at {} Playback time ".format(self.playback_state, state,
self.playback_timer.time()))
self.playback_state = state
self.playback_state_lock.release()
else:
config_dash.LOG.error("Unidentified state: {}".format(state))
def initialize_player(self):
"""Method that update the current playback time"""
start_time = time.time()
initial_wait = 0
paused = False
buffering = False
interruption_start = None
config_dash.LOG.info("Initialized player with video length {}".format(self.playback_duration))
while True:
# Video stopped by the user
if self.playback_state == "END":
config_dash.LOG.info("Finished playback of the video: {} seconds of video played for {} seconds".format(
self.playback_duration, time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
return "STOPPED"
if self.playback_state == "STOP":
# If video is stopped quit updating the playback time and exit player
config_dash.LOG.info("Player Stopped at time {}".format(
time.time() - start_time))
config_dash.JSON_HANDLE['playback_info']['end_time'] = time.time()
self.playback_timer.pause()
self.log_entry("Stopped")
return "STOPPED"
# If paused by user
if self.playback_state == "PAUSE":
if not paused:
# do not update the playback time. Wait for the state to change
config_dash.LOG.info("Player Paused after {:4.2f} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
paused = True
continue
# If the playback encounters buffering during the playback
if self.playback_state == "BUFFERING":
if not buffering:
config_dash.LOG.info("Entering buffering stage after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
buffering = True
interruption_start = time.time()
config_dash.JSON_HANDLE['playback_info']['interruptions']['count'] += 1
# If the size of the buffer is greater than the RE_BUFFERING_DURATION then start playback
else:
# If the RE_BUFFERING_DURATION is greate than the remiang length of the video then do not wait
remaining_playback_time = self.playback_duration - self.playback_timer.time()
if ((self.buffer.qsize() >= config_dash.RE_BUFFERING_COUNT) or (
config_dash.RE_BUFFERING_COUNT * self.segment_duration >= remaining_playback_time
and self.buffer.qsize() > 0)):
buffering = False
if interruption_start:
interruption_end = time.time()
interruption = interruption_end - interruption_start
config_dash.JSON_HANDLE['playback_info']['interruptions']['events'].append(
(interruption_start, interruption_end))
config_dash.JSON_HANDLE['playback_info']['interruptions']['total_duration'] += interruption
config_dash.LOG.info("Duration of interruption = {}".format(interruption))
interruption_start = None
self.set_state("PLAY")
self.log_entry("Buffering-Play")
if self.playback_state == "INITIAL_BUFFERING":
if self.buffer.qsize() < config_dash.INITIAL_BUFFERING_COUNT:
initial_wait = time.time() - start_time
continue
else:
config_dash.LOG.info("Initial Waiting Time = {}".format(initial_wait))
config_dash.JSON_HANDLE['playback_info']['initial_buffering_duration'] = initial_wait
# config_dash.JSON_HANDLE['playback_info']['start_time'] = time.time()
config_dash.JSON_HANDLE['playback_info']['start_time']=time.asctime( time.localtime(time.time()))
self.set_state("PLAY")
self.log_entry("InitialBuffering-Play")
if self.playback_state == "PLAY":
# Check of the buffer has any segments
if self.playback_timer.time() == self.playback_duration:
self.set_state("END")
self.log_entry("Play-End")
if self.buffer.qsize() == 0:
config_dash.LOG.info("Buffer empty after {} seconds of playback".format(
self.playback_timer.time()))
self.playback_timer.pause()
self.set_state("BUFFERING")
self.log_entry("Play-Buffering")
continue
# Read one the segment from the buffer
# Acquire Lock on the buffer and read a segment for it
self.buffer_lock.acquire()
play_segment = self.buffer.get()
self.buffer_lock.release()
config_dash.LOG.info("Reading the segment number {} from the buffer at playtime {}".format(
play_segment['segment_number'], self.playback_timer.time()))
self.log_entry(action="StillPlaying", bitrate=play_segment["bitrate"])
# Calculate time playback when the segment finishes
future = self.playback_timer.time() + play_segment['playback_length']
# Start the playback
self.playback_timer.start()
flag=0
while self.playback_timer.time() < future:
# If playback hasn't started yet, set the playback_start_time
if not self.playback_start_time:
self.playback_start_time = time.time()
config_dash.LOG.info("Started playing with representation {} at {}".format(
play_segment['bitrate'], self.playback_timer.time()))
flag+=1
if flag==1:
self.do_request=True
# Duration for which the video was played in seconds (integer)
if self.playback_timer.time() >= self.playback_duration:
config_dash.LOG.info("Completed the video playback: {} seconds".format(
self.playback_duration))
self.playback_timer.pause()
self.set_state("END")
self.log_entry("TheEnd")
return
else:
self.buffer_length_lock.acquire()
self.buffer_length -= int(play_segment['playback_length'])
config_dash.LOG.debug("Decrementing buffer_length by {}. dash_buffer = {}".format(
play_segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
if self.segment_limit:
if int(play_segment['segment_number']) >= self.segment_limit:
self.set_state("STOP")
config_dash.LOG.info("Stopped playback after segment {} at playtime {}".format(
play_segment['segment_number'], self.playback_duration))
def write(self, segment):
""" write segment to the buffer.
Segment is dict with keys ['data', 'bitrate', 'playback_length', 'URI', 'size']
"""
# Acquire Lock on the buffer and add a segment to it
if not self.actual_start_time:
self.actual_start_time = time.time()
config_dash.JSON_HANDLE['playback_info']['start_time'] = self.actual_start_time
config_dash.LOG.info("Writing segment {} at time {}".format(segment['segment_number'],
time.time() - self.actual_start_time))
self.buffer_lock.acquire()
self.buffer.put(segment)
self.buffer_lock.release()
self.buffer_length_lock.acquire()
self.buffer_length += int(segment['playback_length'])
config_dash.LOG.debug("Incrementing buffer_length by {}. dash_buffer = {}".format(
segment['playback_length'], self.buffer_length))
self.buffer_length_lock.release()
self.log_entry(action="Writing", bitrate=segment['bitrate'])
def start(self):
""" Start playback"""
self.set_state("INITIAL_BUFFERING")
self.log_entry("Starting")
config_dash.LOG.info("Starting the Player")
self.player_thread = threading.Thread(target=self.initialize_player)
self.player_thread.daemon = True
self.player_thread.start()
self.log_entry(action="Starting")
def stop(self):
"""Method to stop the playback"""
self.set_state("STOP")
self.log_entry("Stopped")
config_dash.LOG.info("Stopped the playback")
def log_entry(self, action, bitrate=0):
"""Method to log the current state"""
if self.buffer_log_file:
header_row = None
if self.actual_start_time:
log_time = time.time() - self.actual_start_time
else:
log_time = 0
if not os.path.exists(self.buffer_log_file):
header_row = "EpochTime,CurrentPlaybackTime,CurrentBufferSize,CurrentPlaybackState,Action,Bitrate".split(",")
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
else:
stats = (log_time, str(self.playback_timer.time()), self.buffer.qsize(),
self.playback_state, action,bitrate)
str_stats = [str(i) for i in stats]
with open(self.buffer_log_file, "a") as log_file_handle: # it should be "ab" not "a" # Jerry
result_writer = csv.writer(log_file_handle, delimiter=",")
if header_row:
result_writer.writerow(header_row)
result_writer.writerow(str_stats)
config_dash.LOG.info("BufferStats: EpochTime=%s,CurrentPlaybackTime=%s,CurrentBufferSize=%s,"
"CurrentPlaybackState=%s,Action=%s,Bitrate=%s" % tuple(str_stats))
|
bus.py
|
from abc import abstractmethod
from asyncio.tasks import Task
from configparser import ConfigParser
import threading
import asyncio
import queue
import os
import time
import sys
from functools import reduce
from random import getrandbits, randrange
from typing import Any, Callable, Coroutine, Optional, Tuple, TypeVar, Union, cast, List, Dict
from textwrap import wrap
from os import path
from hashlib import sha1
from .constants import *
from .logger.constants import *
from .control.constants import *
from .system.constants import *
from .role_manager.constants import *
from .unique_brain.constants import *
from .packet import *
from .transport import Transport
from .util import now, log, logv
from .pack import PackTuple, PackType, jdpack, jdunpack
EV_CHANGE = "change"
EV_DEVICE_CONNECT = "deviceConnect"
EV_DEVICE_CHANGE = "deviceChange"
EV_DEVICE_ANNOUNCE = "deviceAnnounce"
EV_SELF_ANNOUNCE = "selfAnnounce"
EV_PACKET_PROCESS = "packetProcess"
EV_REPORT_RECEIVE = "reportReceive"
EV_REPORT_UPDATE = "reportUpdate"
EV_RESTART = "restart"
EV_PACKET_RECEIVE = "packetReceive"
EV_EVENT = "packetEvent"
EV_STATUS_EVENT = "statusEvent"
EV_IDENTIFY = "identify"
EV_CONNECTED = "connected"
EV_DISCONNECTED = "disconnected"
# _ACK_RETRIES = const(4)
# _ACK_DELAY = const(40)
RegType = TypeVar('RegType', bound=Union[PackType, PackTuple])
HandlerFn = Callable[..., Union[None, Coroutine[Any, Any, None]]]
EventHandlerFn = Callable[..., None]
UnsubscribeFn = Callable[..., None]
class EventEmitter:
def __init__(self, bus: 'Bus') -> None:
self.bus = bus
def emit(self, id: str, *args: object):
def add_cb(fn: HandlerFn):
def cb():
r = fn(*args)
if r is None:
return
# print(r)
t = self.bus.loop.create_task(r)
self.bus.pending_tasks.append(t)
# print(t)
self.bus.loop.call_soon(cb)
self.bus.force_jd_thread()
if not hasattr(self, "_listeners"):
return
idx = 0
while idx < len(self._listeners):
lid, fn, once = self._listeners[idx]
if lid == id:
# note that add_cb() can't be inlined here due to lack of block scope in Python
add_cb(fn)
if once:
del self._listeners[idx]
idx -= 1
idx += 1
def _init_emitter(self):
if not hasattr(self, "_listeners"):
self._listeners: List[Tuple[str, HandlerFn, bool]] = []
def on(self, id: str, fn: HandlerFn) -> UnsubscribeFn:
"""Subscribes an event to a handler. Returns a callback to unsubscribe.
Args:
id (str): event identifier
fn (HandlerFn): event callback
Returns: callback to unsubscribe
"""
self._init_emitter()
self._listeners.append((id, fn, False))
def unsubscribe():
return self.off(id, fn)
return unsubscribe
def once(self, id: str, fn: HandlerFn):
"""Subscribes an event to run once; then get unsubscribed
Args:
id (str): event identifier
fn (HandlerFn): event callback
"""
self._init_emitter()
self._listeners.append((id, fn, True))
def off(self, id: str, fn: HandlerFn):
"""Unsubscribes a handler from an event
Args:
id (str): event identifier
fn (HandlerFn): event callback
"""
self._init_emitter()
for i in range(len(self._listeners)):
id2, fn2, _ign = self._listeners[i]
if id == id2 and fn is fn2:
del self._listeners[i]
return
raise ValueError("no matching on() for off()")
# usage: await x.event("...")
async def event(self, id: str):
f = self.bus.loop.create_future()
self.once(id, lambda: f.set_result(None))
await f
def wait_for(self, id: str):
self.bus.force_non_jd_thread()
cv = threading.Condition()
happened = False
def poke(*args: object):
nonlocal happened
with cv:
happened = True
cv.notify()
self.once(id, poke)
with cv:
while not happened:
cv.wait()
def _log_report_prefix(self) -> str:
return ""
def _add_log_report(self, priority: int, text: str, *args: object):
prefix = self._log_report_prefix()
msg = prefix + text
logger = self.bus.logger
if logger:
logger.report(priority, msg, *args)
def log(self, text: str, *args: object):
self._add_log_report(LoggerPriority.LOG, text, *args)
def warn(self, text: str, *args: object):
self._add_log_report(LoggerPriority.WARNING, text, *args)
def debug(self, text: str, *args: object):
self._add_log_report(LoggerPriority.DEBUG, text, *args)
def error(self, text: str, *args: object):
self._add_log_report(LoggerPriority.ERROR, text, *args)
def _service_matches(dev: 'Device', serv: bytearray):
ds = dev.services
if not ds or len(ds) != len(serv):
return False
for i in range(4, len(serv)):
if ds[i] != serv[i]:
return False
return True
def rand_u64():
return bytearray([getrandbits(8) for _ in range(8)])
def is_raspberrypi():
# https://raspberrypi.stackexchange.com/questions/5100/detect-that-a-python-program-is-running-on-the-pi
try:
import io
with io.open('/sys/firmware/devicetree/base/model', 'r') as m:
if 'raspberry pi' in m.read().lower(): return True
except Exception: pass
return False
class Bus(EventEmitter):
"""A Jacdac bus that managed devices, service client, registers."""
def __init__(self, *,
transports: Optional[List[Transport]] = None,
device_id: Optional[str] = None,
product_identifier: Optional[int] = None,
firmware_version: Optional[str] = None,
device_description: Optional[str] = None,
disable_logger: bool = False,
disable_role_manager: bool = False,
disable_settings: bool = False,
disable_brain: bool = False,
disable_dev_tools: bool = False,
spi: Optional[bool] = None,
hf2_portname: Optional[str] = None,
transport_cmd: Optional[str] = None,
default_logger_min_priority: Optional[int] = None,
storage_dir: Optional[str] = None
) -> None:
"""Creates a new Jacdac bus.
All configuration options, aside form transports, can also be specified in ini configuration files ./jacdac.config, ./.jacdac/config.ini or setup.cfg.
Args:
transports (List[Transport]): packet transports
storage_dir (str): Optional settings directory where settings, roles are stored.
device_id (str, optional): Optional device identifier. Auto-generated if not specified.
product_identifier (int, optional): Optional product identifier.
device_description (str, optional): Optional device description.
disable_logger (bool, optional): Disable the logger service. Defaults to False.
disable_role_manager (bool, optional): Disable the role manager service. Defaults to False.
disable_settings (bool, optional): Disable the settings service. Defaults to False.
disable_brain (bool, optional): Disable unique brain service. Defaults to False.
default_logger_min_priority (int, optional): Optional mininimum logger priority. Defaults to JD_LOGGER_PRIORITY_SILENT.
disable_dev_tools (bool, optional): Do not try to connect to developer tools server.
hf2_portname (str, optional): port name exposing HF2 packets.
transport_cmd (str, optional): name of executable to run as a transport.
spi (bool, optional): use SPI for transport. Enabled by default if Raspberry Pi and spi is None.
"""
super().__init__(self)
self.devices: List['Device'] = []
self.unattached_clients: List['Client'] = []
self.all_clients: List['Client'] = []
self.servers: List['Server'] = []
self.logger: Optional[LoggerServer] = None
self.role_manager: Optional[RoleManagerServer] = None
self.pipes: List['InPipe'] = []
self._event_counter = 0
# merge .ctor configuration with files
config = ConfigParser()
config.read(["./jacdac.ini", os.path.expanduser("~") + "/.jacdac/config.ini", "./setup.cfg"])
if not config.has_section("jacdac"):
cfg = config.add_section("jacdac")
cfg = config["jacdac"]
device_id = device_id or cfg.get(
"device_id", rand_u64().hex())
self.product_identifier: Optional[int] = product_identifier or cfg.getint(
"product_identifier", None)
self.firmware_version: Optional[str] = firmware_version or cfg.get(
"firmware_version", None)
self.device_description: Optional[str] = device_description
self.disable_brain = disable_brain or cfg.getboolean(
"disable_brain", False)
self.disable_logger = disable_logger or cfg.getboolean(
"disable_logger", False)
self.disable_settings = disable_settings or cfg.getboolean(
"disable_settings", False)
self.disable_dev_tools = disable_dev_tools or cfg.getboolean(
"disable_dev_tools", False)
self.disable_role_manager = disable_role_manager or cfg.getboolean(
"disable_role_manager", False)
self.default_logger_min_priority = default_logger_min_priority or cfg.getint(
"default_logger_min_priority", LoggerPriority.SILENT)
self.storage_dir = storage_dir or cfg.get("storage_dir", "./.jacdac")
self.hf2_portname = hf2_portname or cfg.get("hf2_portname")
self.transport_cmd = transport_cmd or cfg.get("transport_cmd")
# automatically turn on SPI transport on the Pi
if spi is None:
spi = is_raspberrypi()
self.spi = spi or cfg.getboolean("spi", None)
self.self_device = Device(self, device_id, bytearray(4))
self.process_thread = threading.Thread(target=self._process_task)
self.process_thread.daemon = True
self.transports: List[Transport] = transports or []
if not self.disable_dev_tools:
from .transports.ws import WebSocketTransport
self.transports.append(WebSocketTransport(DEVTOOLS_SOCKET_URL))
if self.transport_cmd:
from .transports.exec import ExecTransport
self.transports.append(ExecTransport(self.transport_cmd))
if self.hf2_portname:
from .transports.hf2 import HF2Transport
self.transports.append(HF2Transport(self.hf2_portname))
if self.spi:
from .transports.spi import SpiTransport
self.transports.append(SpiTransport())
self._sendq: queue.Queue[Tuple[Transport, bytes]] = queue.Queue()
self.pending_tasks: List[asyncio.Task[None]] = []
self.loop = asyncio.new_event_loop()
def handler(loop, context): # type: ignore
self.loop.default_exception_handler(context) # type: ignore
os._exit(10)
self.loop.set_exception_handler(handler) # type: ignore
self.sender_thread = threading.Thread(target=self._sender)
self.sender_thread.daemon = True
self.sender_thread.start()
# self.taskq.recurring(2000, self.debug_dump)
self.process_thread.start()
print("starting jacdac, self device {}".format(self.self_device))
def run(self, cb: Callable[..., None], *args: Any):
if self.process_thread is threading.current_thread():
cb(*args)
else:
self.loop.call_soon(cb, *args)
def _sender(self):
while True:
c = self._sendq.get()
sender = c[0]
pkt = c[1]
for transport in self.transports:
if sender != transport:
transport.send(pkt)
def _process_task(self):
loop = self.loop
asyncio.set_event_loop(loop)
# TODO: what's the best way to import these things
ctrls = ControlServer(self) # attach control server
if not self.disable_logger:
self.logger = LoggerServer(self)
if self.storage_dir and not self.disable_role_manager:
self.role_manager = RoleManagerServer(self)
if self.storage_dir and not self.disable_settings:
from .settings.server import SettingsServer
self.settings = SettingsServer(self)
if not self.disable_brain:
UniqueBrainServer(self)
def keep_task(t: 'asyncio.Task[None]'):
if t.done():
t.result() # throw exception if needed
return False
return True
def announce():
self.emit(EV_SELF_ANNOUNCE)
self._gc_devices()
ctrls.queue_announce()
self.pending_tasks = [
x for x in self.pending_tasks if keep_task(x)]
loop.call_later(0.500, announce)
loop.call_later(0.500, announce)
def process_later(sender: Transport, pkt: bytes):
loop.call_soon_threadsafe(self.process_frame, sender, pkt)
for transport in self.transports:
def process(pkt: bytes):
process_later(transport, pkt)
transport.on_receive = process
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def process_frame(self, sender: Transport, frame: bytes):
if frame[2] - frame[12] < 4:
# single packet in frame
pkt = JDPacket(frombytes=frame, sender=sender)
self.process_packet(pkt)
# dispatch to other transports
self._queue_core(pkt)
else:
# split into frames
ptr = 12
while ptr < 12 + frame[2]:
sz = frame[ptr] + 4
pktbytes = frame[0:12] + frame[ptr:ptr+sz]
# log("PKT: {}-{} / {}", ptr, len(frame), pktbytes.hex())
pkt = JDPacket(frombytes=pktbytes, sender=sender)
if ptr > 12:
pkt.requires_ack = False # only ack once
self.process_packet(pkt)
# dispatch to other transports
self._queue_core(pkt)
ptr += (sz + 3) & ~3
def force_jd_thread(self):
assert threading.current_thread() is self.process_thread
def force_non_jd_thread(self):
assert threading.current_thread() is not self.process_thread
def debug_dump(self):
print("Devices:")
for dev in self.devices:
info = dev.debug_info()
if dev is self.self_device:
info = "SELF: " + info
print(info)
print("END")
def lookup_server(self, service_class: int) -> Optional['Server']:
for s in self.servers:
if s.service_class == service_class:
return s
return None
def _gc_devices(self):
now_ = now()
cutoff = now_ - 2000
self.self_device.last_seen = now_ # make sure not to gc self
newdevs: List['Device'] = []
for dev in self.devices:
if dev.last_seen < cutoff:
dev._destroy()
else:
newdevs.append(dev)
if len(newdevs) != len(self.devices):
self.devices = newdevs
self.emit(EV_DEVICE_CHANGE)
self.emit(EV_CHANGE)
def _queue_core(self, pkt: JDPacket):
assert len(pkt._data) == pkt._header[12]
pkt._header[2] = len(pkt._data) + 4
buf = pkt._header + pkt._data
crc = util.crc16(buf, 2)
sender = cast(Transport, pkt.sender)
util.set_u16(buf, 0, crc)
util.set_u16(pkt._header, 0, crc)
self._sendq.put((sender, buf))
def _send_core(self, pkt: JDPacket):
self._queue_core(pkt)
self.process_packet(pkt) # handle loop-back packet
def clear_attach_cache(self):
self.debug("clear attach cache")
for d in self.devices:
# add a dummy byte at the end (if not done already), to force re-attach of services
if (len(d.services) & 3) == 0:
d.services.append(0)
def mk_event_cmd(self, ev_code: int):
if not self._event_counter:
self._event_counter = 0
self._event_counter = (self._event_counter +
1) & CMD_EVENT_COUNTER_MASK
assert (ev_code >> 8) == 0
return (
CMD_EVENT_MASK |
(self._event_counter << CMD_EVENT_COUNTER_POS) |
ev_code
)
def _reattach(self, dev: 'Device'):
dev.last_seen = now()
self.debug("reattaching services to {}; {}/{} to attach", dev,
len(self.unattached_clients), len(self.all_clients))
new_clients: List['Client'] = []
occupied = bytearray(dev.num_service_classes)
for c in dev.clients:
if c.broadcast:
c._detach()
continue # will re-attach
assert c.service_index is not None
new_class = dev.service_class_at(c.service_index)
if new_class == c.service_class and dev.matches_role_at(c.role, c.service_index):
new_clients.append(c)
occupied[c.service_index] = 1
else:
c._detach()
dev.clients = new_clients
self.emit(EV_DEVICE_ANNOUNCE, dev)
if len(self.unattached_clients) == 0:
return
for i in range(1, dev.num_service_classes):
if occupied[i]:
continue
service_class = dev.service_class_at(i)
for cc in self.unattached_clients:
if cc.service_class == service_class:
if cc._attach(dev, i):
break
def process_packet(self, pkt: JDPacket):
logv("route: {}", pkt)
dev_id = pkt.device_id
multi_command_class = pkt.multicommand_class
service_index = pkt.service_index
# TODO implement send queue for packet compression
if pkt.requires_ack and pkt.device_id == self.self_device.device_id:
ack = JDPacket(cmd=pkt.crc)
ack.service_index = JD_SERVICE_INDEX_CRC_ACK
ack.device_id = self.self_device.device_id
self._send_core(ack)
self.emit(EV_PACKET_PROCESS, pkt)
if service_index == JD_SERVICE_INDEX_PIPE and pkt.device_id == self.self_device.device_id:
port = pkt.service_command >> PIPE_PORT_SHIFT
pipe = next(filter(lambda p: p.port == port, self.pipes), None)
if pipe:
pipe.handle_packet(pkt)
else:
self.debug("unknown pipe port {}", port)
return
if multi_command_class != None:
if not pkt.is_command:
return # only commands supported in multi-command
for h in self.servers:
if h.service_class == multi_command_class:
# pretend it's directly addressed to us
pkt.device_id = self.self_device.device_id
pkt.service_index = h.service_index
h.handle_packet_outer(pkt)
elif dev_id == self.self_device.device_id and pkt.is_command:
h = self.servers[pkt.service_index]
if h:
# log(`handle pkt at ${h.name} cmd=${pkt.service_command}`)
h.handle_packet_outer(pkt)
else:
if pkt.is_command:
return # it's a command, and it's not for us
dev = None
try:
dev = next(d for d in self.devices if d.device_id == dev_id)
except:
pass
if (pkt.service_index == JD_SERVICE_INDEX_CTRL):
if (pkt.service_command == 0):
if (dev and dev.reset_count > (pkt.data[0] & 0xf)):
# if the reset counter went down, it means the device reseted;
# treat it as new device
self.debug("device resetted")
self.devices.remove(dev)
dev._destroy()
dev = None
self.emit(EV_RESTART)
matches = False
if not dev:
dev = Device(self, pkt.device_id, pkt.data)
self.emit(EV_DEVICE_CONNECT, dev)
else:
matches = _service_matches(dev, pkt.data)
dev.services = pkt.data
if not matches:
self._reattach(dev)
if dev:
dev.process_packet(pkt)
return
elif (pkt.service_index == JD_SERVICE_INDEX_CRC_ACK):
# _got_ack(pkt)
pass
# we can't know the serviceClass,
# no announcement seen yet for this device
if not dev:
return
dev.process_packet(pkt)
def add_pipe(self, pipe: 'InPipe'):
port = randrange(1, 511)
while any(p.port == port for p in self.pipes):
port = randrange(1, 511)
pipe.port = port
self.pipes.append(pipe)
def remove_pipe(self, pipe: 'InPipe'):
if pipe in self.pipes:
self.pipes.remove(pipe)
class InPipe(EventEmitter):
"""Incoming pipe"""
def __init__(self, bus: Bus):
super().__init__(bus)
self.bus = bus
self.port = -1
self.next_cnt = 0
self.closed = False
self.in_q: List[bytearray] = []
self.port = -1
self.bus.add_pipe(self)
def open_command(self, cmd: int):
return JDPacket.packed(cmd, "b[8] u16 u16", bytearray.fromhex(self.bus.self_device.device_id), self.port, 0)
def bytes_available(self) -> int:
return reduce(lambda x, y: x + len(y), self.in_q, 0)
def read(self) -> Optional[bytearray]:
while True:
if len(self.in_q):
return self.in_q.pop(0)
if self.closed:
return None
self.wait_for(EV_REPORT_RECEIVE)
def _close(self):
self.closed = True
self.bus.remove_pipe(self)
def close(self):
self._close()
self.in_q = []
def meta(self, pkt: JDPacket):
pass
def handle_packet(self, pkt: JDPacket):
cmd = pkt.service_command
if (cmd & PIPE_COUNTER_MASK) != (self.next_cnt & PIPE_COUNTER_MASK):
return
self.next_cnt += 1
if cmd & PIPE_CLOSE_MASK:
self._close()
if cmd & PIPE_METADATA_MASK:
self.meta(pkt)
else:
self.in_q.append(pkt.data)
self.emit(EV_REPORT_RECEIVE)
def read_list(self) -> List[bytearray]:
r: List[bytearray] = []
while True:
buf = self.read()
if not buf:
break
if len(buf):
r.append(buf)
return r
class OutPipe(EventEmitter):
"""Out going packet pipe"""
def __init__(self, bus: 'Bus', pkt: JDPacket) -> None:
super().__init__(bus)
[device_id_bytes, port] = pkt.unpack("b[8] u16")
self.device_id = cast(bytearray, device_id_bytes).hex()
self.port = cast(int, port)
self.next_cnt = 0
@ property
def open(self):
return not not self.port
def write_ex(self, buf: bytearray, flags: int):
if not self.port:
return
pkt = JDPacket(
cmd=(self.next_cnt & PIPE_COUNTER_MASK) |
(self.port << PIPE_PORT_SHIFT) |
flags,
data=buf
)
self.next_cnt += 1
if flags & PIPE_CLOSE_MASK:
self.port = None
pkt.service_index = JD_SERVICE_INDEX_PIPE
pkt.requires_ack = True
pkt.device_id = self.device_id
self.bus._send_core(pkt)
# TODO: check acks
# if not pkt._send_with_ack(self.device_id):
# self.port = None
# throw "out pipe error: no ACK"
def write(self, buf: bytearray):
self.write_ex(buf, 0)
def write_and_close(self, buf: bytearray):
self.write_ex(buf, PIPE_CLOSE_MASK)
def close(self):
self.write_and_close(bytearray(0))
def write_meta(self, buf: bytearray):
self.write_ex(buf, PIPE_METADATA_MASK)
class RawRegisterClient(EventEmitter):
"""A Jacdac register client
"""
def __init__(self, client: 'Client', code: int, pack_format: Optional[str]) -> None:
super().__init__(client.bus)
self.code = code
self._data: Optional[bytearray] = None
self._refreshed_at = 0
self.client = client
self.pack_format = pack_format
self.not_implemented = False
def clear(self):
self._data = None
self._refreshed_at = 0
self.not_implemented = False
def current(self, refresh_ms: int = -1):
if refresh_ms < 0 or self._refreshed_at + refresh_ms >= now():
return self._data
return None
def values(self) -> Optional[PackTuple]:
data = self.query_no_wait()
if data and self.pack_format:
return jdunpack(data, self.pack_format)
return None
def set_values(self, *args: PackType):
if self.pack_format is None:
raise RuntimeError("set_value not supported")
if not self.client.connected:
return
data = jdpack(self.pack_format, *args)
def send():
pkt = JDPacket(cmd=JD_SET(self.code), data=data)
self.client.send_cmd(pkt)
self.refresh()
self.bus.run(send)
def value(self, default_value: Any = None) -> Optional[Any]:
"""Extracts the value of the first field."""
values = self.values()
if values is None:
return default_value
else:
return values[0]
def bool_value(self, default_value: bool = None) -> Optional[bool]:
"""Extracts the value of the first field as a boolean."""
value = self.value()
return bool(value) if not value is None else default_value
def float_value(self, default_value: float = None, scale: int = 1) -> Optional[float]:
value = self.value()
return float(value) * scale if not value is None else default_value
def _query(self):
if not self.client.connected:
return
pkt = JDPacket(cmd=JD_GET(self.code))
self.client.send_cmd(pkt)
def refresh(self):
if not self.client.connected or self._refreshed_at < 0 or self.not_implemented:
return # already in progress
def do_refresh():
prev_data = self._data
self._refreshed_at = -1
def final_check():
if prev_data is self._data:
# if we still didn't get any data, emit "change" event, so that queries can time out
self._data = None
self._refreshed_at = 0
self.emit(EV_CHANGE)
def second_refresh():
if prev_data is self._data:
self._query()
self.bus.loop.call_later(0.100, final_check)
def first_refresh():
if prev_data is self._data:
self._query()
self.bus.loop.call_later(0.050, second_refresh)
self._query()
self.bus.loop.call_later(0.020, first_refresh)
self.bus.run(do_refresh)
# can't be called from event handlers!
def query(self, refresh_ms: int = 500):
if self.not_implemented:
return None
curr = self.current(refresh_ms)
if curr:
return curr
self.refresh()
self.wait_for(EV_CHANGE)
if self._data is None:
raise RuntimeError(
"Can't read reg #{} (from {})".format(hex(self.code), self.client))
return self._data
async def query_async(self, refresh_ms: int = 500):
if self.not_implemented:
return None
curr = self.current(refresh_ms)
if curr:
return curr
self.refresh()
# todo: test if changed
await self.event(EV_CHANGE)
if self._data is None:
raise RuntimeError(
"Can't read reg #{} (from {})".format(hex(self.code), self.client))
return self._data
def query_no_wait(self, refresh_ms: int = -1):
if self.not_implemented:
return None
curr = self.current(refresh_ms)
if curr:
return curr
self.refresh()
return self._data
def handle_packet(self, pkt: JDPacket):
if self.not_implemented:
return
if pkt.is_reg_get and pkt.reg_code == self.code:
self._data = pkt.data
self._refreshed_at = now()
self.emit(EV_CHANGE)
class Server(EventEmitter):
def __init__(self, bus: Bus, service_class: int, *, instance_name: str = None) -> None:
super().__init__(bus)
self.service_class = service_class
self.instance_name = instance_name
self.service_index = None
self._status_code = 0 # u16, u16
self.service_index = len(self.bus.servers)
self.bus.servers.append(self)
def status_code(self):
return self._status_code
def set_status_code(self, code: int, vendor_code: int):
c = ((code & 0xffff) << 16) | (vendor_code & 0xffff)
if c != self._status_code:
self._status_code = c
self.send_change_event()
def handle_packet_outer(self, pkt: JDPacket):
cmd = pkt.service_command
if cmd == JD_GET(JD_REG_STATUS_CODE):
self.handle_status_code(pkt)
elif cmd == JD_GET(JD_REG_INSTANCE_NAME):
self._handle_instance_name(pkt)
else:
# self.state_updated = False
self.handle_packet(pkt)
def handle_packet(self, pkt: JDPacket):
pass
def send_report(self, pkt: JDPacket):
pkt.service_index = self.service_index
pkt.device_id = self.bus.self_device.device_id
self.bus._send_core(pkt)
def send_event(self, event_code: int, data: bytes = None):
pkt = JDPacket(cmd=self.bus.mk_event_cmd(event_code), data=data)
def resend(): self.send_report(pkt)
def trisend():
resend()
self.bus.loop.call_later(0.020, resend)
self.bus.loop.call_later(0.100, resend)
self.bus.run(trisend)
def send_change_event(self):
self.send_event(JD_EV_CHANGE)
self.emit(EV_CHANGE)
def handle_status_code(self, pkt: JDPacket):
self.handle_reg_u32(pkt, JD_REG_STATUS_CODE, self._status_code)
def handle_reg_u8(self, pkt: JDPacket, register: int, current: int):
return self.handle_reg(pkt, register, "u8", current)
def handle_reg_u32(self, pkt: JDPacket, register: int, current: int):
return self.handle_reg(pkt, register, "u32", current)
def handle_reg_i32(self, pkt: JDPacket, register: int, current: int):
return self.handle_reg(pkt, register, "i32", current)
def handle_reg(self, pkt: JDPacket, register: int, fmt: str, current: RegType) -> RegType:
getset = pkt.service_command >> 12
if getset == 0 or getset > 2:
return current
reg = pkt.service_command & 0xfff
if reg != register:
return current
if getset == 1:
if isinstance(current, tuple):
self.send_report(JDPacket.packed(
pkt.service_command, fmt, *current))
else:
self.send_report(JDPacket.packed(
pkt.service_command, fmt, current))
else:
if register >> 8 == 0x1:
return current # read-only
v = pkt.unpack(fmt)
if not isinstance(current, tuple):
v = v[0]
if v != current:
self.state_updated = True
current = cast(RegType, v)
return current
def _handle_instance_name(self, pkt: JDPacket):
self.send_report(JDPacket(cmd=pkt.service_command,
data=bytearray(self.instance_name or "", "utf-8")))
def _log_report_prefix(self) -> str:
return "{}.{}>".format(self.bus.self_device,
self.instance_name or self.service_index)
class SensorServer(Server):
def __init__(self, bus: Bus, service_class: int, streaming_interval: int, *, instance_name: str = None, streaming_preferred_interval: int = None) -> None:
super().__init__(bus, service_class, instance_name=instance_name)
self.streaming_samples: int = 0
self.streaming_preferred_interval: Optional[int] = streaming_preferred_interval
self.streaming_interval = streaming_interval
self._stream_task: Optional[Task[None]] = None
@ abstractmethod
def send_reading(self):
pass
def _start_streaming(self):
if self.streaming_samples > 0 and not self._stream_task:
self._stream_task = asyncio.ensure_future(self._stream())
async def _stream(self):
while(self.streaming_samples > 0):
self.streaming_samples = self.streaming_samples - 1
self.send_reading()
interval = max(20, self.streaming_interval)
await asyncio.sleep(interval / 1000)
self._stream_task = None
def handle_packet(self, pkt: JDPacket):
cmd = pkt.service_command
if cmd == JD_GET(JD_REG_STREAMING_SAMPLES) or cmd == JD_SET(JD_REG_STREAMING_SAMPLES):
self._handle_streaming_samples(pkt)
elif cmd == JD_GET(JD_REG_STREAMING_INTERVAL) or cmd == JD_SET(JD_REG_STREAMING_SAMPLES):
self._handle_streaming_interval(pkt)
elif cmd == JD_GET(JD_REG_STREAMING_PREFERRED_INTERVAL):
self._handle_streaming_preferred_interval(pkt)
super().handle_packet(pkt)
def _handle_streaming_samples(self, pkt: JDPacket):
self.streaming_samples = self.handle_reg_u8(pkt, JD_REG_STREAMING_SAMPLES,
self.streaming_samples)
self._start_streaming()
def _handle_streaming_interval(self, pkt: JDPacket):
self.streaming_interval = self.handle_reg_u32(
pkt, JD_REG_STREAMING_INTERVAL, self.streaming_interval)
def _handle_streaming_preferred_interval(self, pkt: JDPacket):
if self.streaming_preferred_interval:
self.handle_reg_u32(
pkt, JD_REG_STREAMING_PREFERRED_INTERVAL, self.streaming_preferred_interval)
else:
self.send_report(pkt.not_implemented())
class ControlServer(Server):
"""A server for the control service, used internally by the bus."""
def __init__(self, bus: Bus) -> None:
super().__init__(bus, JD_SERVICE_CLASS_CONTROL)
self.restart_counter = 0
self.auto_bind_cnt = 0
def queue_announce(self):
logv("announce: %d " % self.restart_counter)
self.restart_counter += 1
ids = [s.service_class for s in self.bus. servers]
rest = self.restart_counter
if rest > 0xf:
rest = 0xf
ids[0] = (
rest |
ControlAnnounceFlags.IS_CLIENT |
ControlAnnounceFlags.SUPPORTS_ACK |
ControlAnnounceFlags.SUPPORTS_BROADCAST |
ControlAnnounceFlags.SUPPORTS_FRAMES
)
buf = jdpack("u32[]", *ids)
self.send_report(JDPacket(cmd=0, data=buf))
# auto bind
if self.bus.role_manager and self.bus.role_manager.auto_bind:
self.auto_bind_cnt += 1
# also, only do it every two announces (TBD)
if self.auto_bind_cnt >= 2:
self.auto_bind_cnt = 0
self.bus.role_manager.bind_roles()
# def handle_flood_ping(self, pkt: JDPacket):
# num_responses, counter, size = pkt.unpack("IIB")
# payload = bytearray(4 + size)
# for i in range(size): payload[4+i]=i
# def queue_ping():
# if num_responses <= 0:
# control.internal_on_event(
# jacdac.__physId(),
# EVT_TX_EMPTY,
# do_nothing
# )
# else:
# payload.set_number(NumberFormat.UInt32LE, 0, counter)
# self.send_report(
# JDPacket.from(ControlCmd.FloodPing, payload)
# )
# num_responses--
# counter++
# control.internal_on_event(jacdac.__physId(), EVT_TX_EMPTY, queue_ping)
# queue_ping()
def handle_packet(self, pkt: JDPacket):
if pkt.is_reg_get:
reg_code = pkt.reg_code
if reg_code == JD_CONTROL_REG_UPTIME:
self.send_report(JDPacket.packed(
JD_GET(JD_CONTROL_REG_UPTIME), "u64", time.monotonic_ns() // 1000))
elif self.bus.product_identifier and reg_code == JD_CONTROL_REG_PRODUCT_IDENTIFIER:
self.send_report(JDPacket.packed(
JD_GET(JD_CONTROL_REG_PRODUCT_IDENTIFIER), "u32", self.bus.product_identifier))
elif self.bus.firmware_version and reg_code == JD_CONTROL_REG_FIRMWARE_VERSION:
self.send_report(JDPacket.packed(
JD_GET(JD_CONTROL_REG_PRODUCT_IDENTIFIER), "s", self.bus.firmware_version))
elif reg_code == JD_CONTROL_REG_DEVICE_DESCRIPTION:
descr = "{}, {}, {}, jacdac {}".format(
self.bus.device_description or "",
os.name, sys.platform, JD_VERSION)
self.send_report(JDPacket.packed(
JD_GET(JD_CONTROL_REG_DEVICE_DESCRIPTION), "s", descr))
else:
self.send_report(pkt.not_implemented())
else:
cmd = pkt.service_command
if cmd == JD_CONTROL_CMD_SERVICES:
self.queue_announce()
elif cmd == JD_CONTROL_CMD_IDENTIFY:
self.bus.emit(EV_IDENTIFY)
elif cmd == JD_CONTROL_CMD_RESET:
# TODO: reset support
raise RuntimeError("reset requested")
else:
self.send_report(pkt.not_implemented())
class LoggerServer(Server):
def __init__(self, bus: Bus) -> None:
super().__init__(bus, JD_SERVICE_CLASS_LOGGER)
self.min_priority = self.bus.default_logger_min_priority
self._last_listener_time = 0
def handle_packet(self, pkt: JDPacket):
self.min_priority = self.handle_reg_u8(
pkt, JD_LOGGER_REG_MIN_PRIORITY, self.min_priority)
cmd = pkt.service_command
if cmd == JD_SET(JD_LOGGER_REG_MIN_PRIORITY):
d = cast(int, pkt.unpack("u8")[0])
self._last_listener_time = now()
if d < self.min_priority:
self.min_priority = d
return super().handle_packet(pkt)
def report(self, priority: int, msg: str, *args: object):
if priority >= self.min_priority:
log(msg, *args)
cmd: int = -1
if priority == LoggerPriority.DEBUG:
cmd = JD_LOGGER_CMD_DEBUG
elif priority == LoggerPriority.LOG:
cmd = JD_LOGGER_CMD_LOG
elif priority == LoggerPriority.WARNING:
cmd = JD_LOGGER_CMD_WARN
elif priority == JD_LOGGER_CMD_ERROR:
cmd = JD_LOGGER_CMD_ERROR
else:
return
if now() - self._last_listener_time > JD_LOGGER_LISTENER_TIMEOUT:
self._last_listener_time = 0
self.min_priority = self.bus.default_logger_min_priority
if not msg or not self._last_listener_time or priority < self.min_priority:
return
chunks = wrap(msg, JD_SERIAL_MAX_PAYLOAD_SIZE)
def send_chunks():
for chunk in chunks:
self.send_report(JDPacket.packed(cmd, "s", chunk))
self.bus.run(send_chunks)
class UniqueBrainServer(Server):
"""A server for the unique brain service, used internally by the bus"""
def __init__(self, bus: Bus) -> None:
super().__init__(bus, JD_SERVICE_CLASS_UNIQUE_BRAIN)
class DeviceWrapper:
def __init__(self, device: 'Device') -> None:
self.device = device
self.bindings: Dict[int, 'RoleBinding'] = {}
self.score = -1
class RoleBinding:
def __init__(self, role_manager: 'RoleManagerServer', role: str, service_class: int) -> None:
self.role_manager = role_manager
self.role = role
self.service_class = service_class
self.bound_to_dev: Optional[Device] = None
self.bound_to_service_idx: Optional[int] = None
def host(self) -> str:
slash_idx = self.role.find("/")
if slash_idx < 0:
return self.role
else:
return self.role[0: slash_idx]
def select(self, devwrap: DeviceWrapper, service_idx: int):
dev = devwrap.device
if dev == self.bound_to_dev and service_idx == self.bound_to_service_idx:
return
devwrap.bindings[service_idx] = self
self.role_manager.set_role(self.role, dev, service_idx)
self.bound_to_dev = dev
self.bound_to_service_idx = service_idx
class ServerBindings:
def __init__(self, host: str) -> None:
self.host = host
self.bindings: List[RoleBinding] = []
@property
def fully_bound(self) -> bool:
for binding in self.bindings:
if not binding.bound_to_dev:
return False
return True
def score_for(self, devwrap: DeviceWrapper, select: Optional[bool] = False):
"""candidate devices are ordered by [numBound, numPossible, device_id]
where numBound is number of clients already bound to this device
and numPossible is number of clients that can possibly be additionally bound
"""
num_bound = 0
num_possible = 0
dev = devwrap.device
missing: List[RoleBinding] = []
for b in self.bindings:
if b.bound_to_dev:
if b.bound_to_dev == dev:
num_bound += 1
else:
missing.append(b)
sbuf = dev.services
n = dev.num_service_classes
for service_index in range(1, n):
# if service is already bound to some client, move on
if service_index in devwrap.bindings:
continue
service_class = dev.service_class_at(service_index)
for i in range(len(missing)):
if missing[i].service_class == service_class:
# we've got a match!
num_possible += 1 # this can be assigned
# in fact, assign if requested
if select:
missing[i].select(devwrap, service_index)
# this one is no longer missing
missing.pop(i)
# move on to the next service in announce
break
# if nothing can be assigned, the score is zero
if num_possible == 0:
return 0
# otherwise the score is [numBound, numPossible], lexicographic
# numPossible can't be larger than ~64, leave it a few more bits
return (num_bound << 8) | num_possible
class RoleManagerServer(Server):
"""A server for the role manager service
"""
def __init__(self, bus: Bus) -> None:
super().__init__(bus, JD_SERVICE_CLASS_ROLE_MANAGER)
file_name = path.join(self.bus.storage_dir, "roles.json")
from jacdac.settings_file import SettingsFile
self.settings = SettingsFile(file_name)
self.auto_bind = 1
self._old_binding_hash = ""
def handle_packet(self, pkt: JDPacket):
self.auto_bind = self.handle_reg_u8(
pkt, JD_ROLE_MANAGER_REG_AUTO_BIND, self.auto_bind)
cmd = pkt.service_command
if cmd == JD_ROLE_MANAGER_CMD_LIST_ROLES:
self.handle_list_roles(pkt)
elif cmd == JD_ROLE_MANAGER_CMD_CLEAR_ALL_ROLES:
self.handle_clear_all_roles(pkt)
elif cmd == JD_ROLE_MANAGER_CMD_SET_ROLE:
self.handle_set_role(pkt)
elif cmd == JD_GET(JD_ROLE_MANAGER_REG_ALL_ROLES_ALLOCATED):
self.handle_all_roles_allocated(pkt)
else:
super().handle_packet(pkt)
def handle_list_roles(self, pkt: JDPacket):
pipe = OutPipe(self.bus, pkt)
for client in self.bus.all_clients:
device_id = bytearray(0)
service_class = client.service_class
service_index = client.service_index or 0
role = client.role
if client.device:
device_id = bytearray.fromhex(client.device.device_id)
payload = jdpack("b[8] u32 u8 s", device_id,
service_class, service_index, role)
pipe.write(bytearray(payload))
pipe.close()
def handle_all_roles_allocated(self, pkt: JDPacket):
res = 1
for client in self.bus.all_clients:
if not client.broadcast and not client.device:
res = 0
break
self.send_report(JDPacket.packed(pkt.service_command, "u8", res))
def handle_clear_all_roles(self, pkt: JDPacket):
self.settings.clear()
self.bus.clear_attach_cache()
self.bind_roles()
def handle_set_role(self, pkt: JDPacket):
payload = pkt.unpack("b[8] u8 s")
role = cast(str, payload[2])
if role:
self.settings.write(role, pkt.data)
self.bus.clear_attach_cache()
self.bind_roles()
def set_role(self, role: str, device: 'Device', service_idx: int):
key = "{}:{}".format(device.device_id, service_idx)
if device:
self.settings.write(key, bytearray(jdpack("s", role)))
else:
self.settings.delete(key)
self.bus.clear_attach_cache()
def is_match_role(self, role: str, device: 'Device', service_idx: int):
key = "{}:{}".format(device.device_id, service_idx)
current = self.settings.read(key)
stored_role = jdunpack(current, "s")[0] if current else None
return role == stored_role
def _binding_hash(self):
r = ""
for client in self.bus.all_clients:
r += "{}:{}:{},".format(client.role,
client.broadcast or client.device, client.service_index)
return sha1(r.encode("utf-8")).hexdigest()
def _check_changes(self):
new_hash = self._binding_hash()
if self._old_binding_hash != new_hash:
self._old_binding_hash = new_hash
self.bus.clear_attach_cache()
self.send_change_event()
def bind_roles(self):
if len(self.bus.unattached_clients) == 0:
self._check_changes()
return
self.debug("bind roles, {}/{} to bind",
len(self.bus.unattached_clients), len(self.bus.all_clients))
bindings: List[RoleBinding] = []
wraps: List[DeviceWrapper] = []
for device in self.bus.devices:
wraps.append(DeviceWrapper(device))
for cl in self.bus.all_clients:
if not cl.broadcast and cl.role:
b = RoleBinding(self, cl.role, cl.service_class)
if cl.device:
b.bound_to_dev = cl.device
b.bound_to_service_idx = cl.service_index
for w in wraps:
if w.device == cl.device and not cl.service_index is None:
w.bindings[cl.service_index] = b
break
bindings.append(b)
servers: List[ServerBindings] = []
# Group all clients by host
for b in bindings:
hn = b.host()
h: Optional[ServerBindings] = None
for server in servers:
if server.host == hn:
h = server
break
if not h:
h = ServerBindings(hn)
servers.append(h)
h.bindings.append(b)
# exclude hosts that have already everything bound
servers = list(filter(lambda h: not h.fully_bound, servers))
self.debug("servers not fully bound: {}", len(servers))
while len(servers) > 0:
# Get host with maximum number of clients (resolve ties by name)
# This gives priority to assignment of "more complicated" hosts, which are generally more difficult to assign
h = servers[0]
for i in range(1, len(servers)):
a = h
b = servers[i]
clen = len(a.bindings) - len(b.bindings)
if clen < 0 or (clen == 0 and b.host < a.host):
h = b
for d in wraps:
d.score = h.score_for(d)
dev = wraps[0]
for i in range(1, len(wraps)):
a = dev
b = wraps[i]
cscore = a.score - b.score
if cscore < 0 or (cscore == 0 and b.device.device_id < a.device.device_id):
dev = b
self.debug("binding: server {}, device {}, score {}",
h.host, dev.device.short_id, dev.score)
self.debug(" score: {}", ", ".join(
list(map(lambda w: "{}: {}".format(w.device.short_id, w.score), wraps))))
if dev.score == 0:
# nothing can be assigned, on any device
self.debug(" server not bound")
servers.remove(h)
continue
# assign services in order of names - this way foo/servo1 will be assigned before foo/servo2
# in list of advertised services
h.bindings = sorted(h.bindings, key=lambda entry: entry.role)
# "recompute" score, assigning names in process
h.score_for(dev, True)
# if everything bound on this host, remove it from further consideration
if h.fully_bound:
self.debug(" server bound")
servers.remove(h)
else:
# otherwise, remove bindings on the current device, to update sort order
# it's unclear we need this
h.bindings = list(
filter(lambda b: b.bound_to_dev != dev.device, h.bindings))
self.debug(" server {} bindings", len(h.bindings))
# trigger event as needed
self._check_changes()
class Client(EventEmitter):
"""Base class to define service clients."""
def __init__(self, bus: Bus, service_class: int, pack_formats: Dict[int, str], role: str) -> None:
super().__init__(bus)
self.broadcast = False
self.service_class = service_class
self.pack_formats = pack_formats
self.service_index = None
self.device: Optional['Device'] = None
self.role = role
self._registers: List[RawRegisterClient] = []
bus.unattached_clients.append(self)
bus.all_clients.append(self)
def __str__(self) -> str:
return "<Client '{}' srv:{} bnd:{}/{}>".format(
self.role, util.hex_num(self.service_class),
self.device and self.device.short_id, self.service_index)
def _lookup_register(self, code: int):
for reg in self._registers:
if reg.code == code:
return reg
return None
@ property
def connected(self) -> bool:
"""Indicates if the client is a connected to a server"""
return True if self.device else False
def register(self, code: int):
"""Retreives the register by code"""
r = self._lookup_register(code)
if r is None:
pack_format = self._lookup_packformat(code)
r = RawRegisterClient(self, code, pack_format)
self._registers.append(r)
return r
def _lookup_packformat(self, code: int) -> Optional[str]:
if code in self.pack_formats:
return self.pack_formats[code]
return None
def handle_packet(self, pkt: JDPacket):
pass
def handle_packet_outer(self, pkt: JDPacket):
if pkt.is_reg_get:
r = self._lookup_register(pkt.reg_code)
if r is not None:
r.handle_packet(pkt)
if pkt.is_event:
self.emit(EV_EVENT, pkt)
self.handle_packet(pkt)
def send_cmd(self, pkt: JDPacket):
"""Sends a command packet to the server"""
if self.device is None:
return
pkt.service_index = self.service_index
pkt.device_id = self.device.device_id
pkt._header[3] |= JD_FRAME_FLAG_COMMAND
self.bus.run(self.bus._send_core, pkt)
def send_cmd_packed(self, cmd: int, *args: PackType):
if args is None:
pkt = JDPacket(cmd=cmd)
else:
if not cmd in self.pack_formats:
raise RuntimeError("unknown data format")
fmt = self.pack_formats[cmd]
data = jdpack(fmt, *args)
pkt = JDPacket(cmd=cmd, data=data)
self.send_cmd(pkt)
def _attach(self, dev: 'Device', service_idx: int):
assert self.device is None
if not self.broadcast:
if not dev.matches_role_at(self.role, service_idx):
return False
self.device = dev
self.service_index = service_idx
self.bus.unattached_clients.remove(self)
self.debug("attached {}/{} to client {}", dev, service_idx, self.role)
dev.clients.append(self)
self.emit(EV_CONNECTED)
if self.bus.role_manager:
self.bus.role_manager.send_change_event()
return True
def _detach(self):
self.debug("detached")
self.service_index = None
if not self.broadcast:
assert self.device
self.device = None
for reg in self._registers:
reg.clear()
self.bus.unattached_clients.append(self)
self.bus.clear_attach_cache()
self.emit(EV_DISCONNECTED)
if self.bus.role_manager:
self.bus.role_manager.send_change_event()
def on_connect(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""Registers an event handler when the client connects to a server
Args:
handler (EventHandlerFn): function to run with client connects
Returns:
UnsubscribeFn: function to call to unregister handler
"""
return self.on(EV_CONNECTED, handler)
def on_disconnect(self, handler: EventHandlerFn) -> UnsubscribeFn:
"""Registers an event handler when the client disconnects from a server
Args:
handler (EventHandlerFn): function to run with client connects
Returns:
UnsubscribeFn: function to call to unregister handler
"""
return self.on(EV_DISCONNECTED, handler)
def on_event(self, code: int, handler: EventHandlerFn) -> UnsubscribeFn:
"""Registers an event handler for the given event code
Args:
code (int): event identifier code
handler (EventHandlerFn): function to run with decoded event data and packet
Returns:
UnsubscribeFn: function to call to unregister handler
"""
if code in self.pack_formats:
fmt = self.pack_formats[code]
else:
fmt = None
def cb(pkt: JDPacket) -> None:
if pkt.event_code == code:
if fmt is None:
data = []
else:
data = jdunpack(pkt.data, fmt)
handler(data)
return self.on(EV_EVENT, cb)
def _log_report_prefix(self) -> str:
return "{}:{}>".format(self.bus.self_device, self.role)
class SensorClient(Client):
"""A client for a sensor service"""
def __init__(self, bus: Bus, service_class: int, pack_formats: Dict[int, str], role: str, *, preferred_interval: int = None) -> None:
super().__init__(bus, service_class, pack_formats, role)
self.preferred_interval = preferred_interval
@ property
def streaming_samples(self) -> Optional[int]:
"""Queries the current estimated streaming samples value"""
return self.register(JD_REG_STREAMING_SAMPLES).value()
@ property
def streaming_interval(self) -> Optional[int]:
return self.register(JD_REG_STREAMING_INTERVAL).value()
@ property
def streaming_preferred_interval(self) -> Optional[int]:
return self.register(JD_REG_STREAMING_PREFERRED_INTERVAL).value()
def refresh_reading(self) -> None:
if self._should_refresh_streaming_samples():
self.register(JD_REG_STREAMING_SAMPLES).set_values(0xff)
def _lookup_packformat(self, code: int) -> Optional[str]:
if code == JD_REG_STREAMING_SAMPLES:
return "u8"
elif code == JD_REG_STREAMING_INTERVAL:
return "u32"
elif code == JD_REG_STREAMING_PREFERRED_INTERVAL:
return "u32"
return Client._lookup_packformat(self, code)
def _should_refresh_streaming_samples(self) -> bool:
readingReg = self.register(JD_REG_READING)
if readingReg._refreshed_at < 0:
# refresh in progress
return False
samplesReg = self.register(JD_REG_STREAMING_SAMPLES)
if samplesReg._refreshed_at < 0:
# refresh in progress
return False
samples = samplesReg.value()
# check if we have some value
MIN_SAMPLES = 16
if samples is None or samples < MIN_SAMPLES:
return True
interval = self._reading_interval()
# haven't seen samples in a while
if now() > readingReg._refreshed_at + 3 * interval:
return True
# check if the streaming is consumed
if now() > samplesReg._refreshed_at + max(0, samples - MIN_SAMPLES) * interval:
return True
return False
def _reading_interval(self) -> int:
"""Resolves the best refresh interval for streaming"""
interval = self.streaming_interval
if interval:
return interval
interval = self.streaming_preferred_interval
if interval:
return interval
if self.preferred_interval:
return self.preferred_interval
return JD_STREAMING_DEFAULT_INTERVAL
class Device(EventEmitter):
"""A device on the bus"""
def __init__(self, bus: Bus, device_id: str, services: bytearray) -> None:
super().__init__(bus)
self.device_id = device_id
self.services = services
self.clients: List[Client] = []
self.last_seen = now()
self._event_counter: Optional[int] = None
self._ctrl_client: Optional[Client] = None
bus.devices.append(self)
@ property
def ctrl_client(self):
if self._ctrl_client is None:
self._ctrl_client = Client(
self.bus, JD_SERVICE_CLASS_CONTROL, JD_CONTROL_PACK_FORMATS, "")
self._ctrl_client._attach(self, 0)
return self._ctrl_client
@ property
def announce_flags(self):
return util.u16(self.services, 0)
@ property
def reset_count(self):
return self.announce_flags & ControlAnnounceFlags.RESTART_COUNTER_STEADY
@ property
def packet_count(self):
return self.services[2]
@ property
def is_connected(self):
return len(self.clients) != 0
@ property
def short_id(self):
return util.short_id(self.device_id)
def __str__(self) -> str:
return self.short_id
def debug_info(self):
r = "Device: " + self.short_id + "; "
for i in range(self.num_service_classes):
s = self.service_class_at(i)
assert s is not None
r += util.hex_num(s) + ", "
return r
def service_class_at(self, idx: int):
if idx == 0:
return 0
if idx < 0 or idx >= self.num_service_classes:
return None
return util.u32(self.services, idx << 2)
def matches_role_at(self, role: str, service_idx: int):
if not role or role == self.device_id or role == "{}:{}".format(self.device_id, service_idx):
return True
# requires role binding
if role.find(":") > -1:
return False
role_manager = self.bus.role_manager
if not role_manager:
return False
return role_manager.is_match_role(role, self, service_idx)
@ property
def num_service_classes(self):
return len(self.services) >> 2
def _destroy(self):
self.debug("destroy")
for c in self.clients:
c._detach()
self.clients = []
def _log_report_prefix(self) -> str:
return "{}>".format(self.short_id)
def process_packet(self, pkt: JDPacket):
self.last_seen = now()
self.emit(EV_PACKET_RECEIVE, pkt)
if pkt.service_command == JD_CMD_COMMAND_NOT_IMPLEMENTED:
cmd = util.u16(pkt.data, 0)
if cmd >> 12 == CMD_GET_REG >> 12:
reg_code = cmd & CMD_REG_MASK
srv_index = pkt.service_index
for c in self.clients:
if c.service_index == srv_index:
c.register(reg_code).not_implemented = True
break
return
service_class = self.service_class_at(pkt.service_index)
if not service_class or service_class == 0xffffffff:
return
if pkt.is_event:
ec = self._event_counter
if ec is None:
ec = pkt.event_counter - 1
ec += 1
# how many packets ahead and behind current are we?
ahead = (pkt.event_counter - ec) & CMD_EVENT_COUNTER_MASK
behind = (ec - pkt.event_counter) & CMD_EVENT_COUNTER_MASK
# ahead == behind == 0 is the usual case, otherwise
# behind < 60 means self is an old event (or retransmission of something we already processed)
# ahead < 5 means we missed at most 5 events, so we ignore self one and rely on retransmission
# of the missed events, and then eventually the current event
if ahead > 0 and (behind < 60 or ahead < 5):
return
# we got our event
self.emit(EV_EVENT, pkt)
self.bus.emit(EV_EVENT, pkt)
self._event_counter = pkt.event_counter
for c in self.clients:
if (c.broadcast and c.service_class == service_class) or \
(not c.broadcast and c.service_index == pkt.service_index):
# log(`handle pkt at ${client.role} rep=${pkt.serviceCommand}`)
c.device = self
c.handle_packet_outer(pkt)
class BufferClient(Client):
_value: bytearray
_dirty: bool
"""
A client that handles a double-buffer bytes buffer
"""
def __init__(self, bus: Bus, service_class: int, pack_formats: Dict[int, str], role: str) -> None:
super().__init__(bus, service_class, pack_formats, role)
self._value = bytearray(0)
self._dirty = False
@property
def value(self) -> bytearray:
"""
Cached reading value
"""
return self._value
@value.setter
def value(self, v: bytearray) -> None:
# TODO: check for equality
self._value = v or bytearray(0)
self._dirty = True
# TODO: debounce
self.refresh_value()
@property
def dirty(self) -> bool:
return self._dirty
def set_dirty(self) -> None:
self._dirty = True
def refresh_value(self) -> None:
if self._dirty:
self.register(JD_REG_VALUE).set_values(self._value)
self._dirty = False
def update_value_length(self, length: Optional[int]) -> None:
l = len(self._value)
if (not length is None) and l != length:
# harmonize lengths
if length > l:
self._value = self._value + bytearray(length - l)
self._dirty = True
else:
self._value = self._value[0:length -1]
self._dirty = True
|
builder.py
|
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PWIL agent implementation, using JAX."""
import threading
from typing import Callable, Generic, Iterator, List, Optional, Sequence
from acme import adders
from acme import core
from acme import specs
from acme import types
from acme.agents.jax import builders
from acme.agents.jax.pwil import adder as pwil_adder
from acme.agents.jax.pwil import config as pwil_config
from acme.agents.jax.pwil import rewarder
from acme.jax import networks as networks_lib
from acme.jax.imitation_learning_types import DirectPolicyNetwork, DirectRLNetworks # pylint: disable=g-multiple-import
from acme.jax.types import PRNGKey
from acme.utils import counting
from acme.utils import loggers
import dm_env
import numpy as np
import reverb
def _prefill_with_demonstrations(adder: adders.Adder,
demonstrations: Sequence[types.Transition],
reward: Optional[float],
min_num_transitions: int = 0) -> None:
"""Fill the adder's replay buffer with expert transitions.
Assumes that the demonstrations dataset stores transitions in order.
Args:
adder: the agent which adds the demonstrations.
demonstrations: the expert demonstrations to iterate over.
reward: if non-None, populates the environment reward entry of transitions.
min_num_transitions: the lower bound on transitions processed, the dataset
will be iterated over multiple times if needed. Once at least
min_num_transitions are added, the processing is interrupted at the
nearest episode end.
"""
if not demonstrations:
return
reward = np.float32(reward) if reward is not None else reward
remaining_transitions = min_num_transitions
step_type = None
action = None
ts = dm_env.TimeStep(None, None, None, None) # Unused.
while remaining_transitions > 0:
# In case we share the adder or demonstrations don't end with
# end-of-episode, reset the adder prior to add_first.
adder.reset()
for transition_num, transition in enumerate(demonstrations):
remaining_transitions -= 1
discount = np.float32(1.0)
ts_reward = reward if reward is not None else transition.reward
if step_type == dm_env.StepType.LAST or transition_num == 0:
ts = dm_env.TimeStep(dm_env.StepType.FIRST, ts_reward, discount,
transition.observation)
adder.add_first(ts)
observation = transition.next_observation
action = transition.action
if transition.discount == 0. or transition_num == len(demonstrations) - 1:
step_type = dm_env.StepType.LAST
discount = np.float32(0.0)
else:
step_type = dm_env.StepType.MID
ts = dm_env.TimeStep(step_type, ts_reward, discount, observation)
adder.add(action, ts)
if remaining_transitions <= 0:
# Note: we could check `step_type == dm_env.StepType.LAST` to stop at an
# episode end if possible.
break
# Explicitly finalize the Reverb client writes.
adder.reset()
class PWILBuilder(builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
Generic[DirectRLNetworks, DirectPolicyNetwork]):
"""PWIL Agent builder."""
def __init__(self,
rl_agent: builders.ActorLearnerBuilder[DirectRLNetworks,
DirectPolicyNetwork,
reverb.ReplaySample],
config: pwil_config.PWILConfig,
demonstrations_fn: Callable[[], pwil_config.PWILDemonstrations]):
"""Initialize the agent.
Args:
rl_agent: the standard RL algorithm.
config: PWIL-specific configuration.
demonstrations_fn: A function that returns an iterator over contiguous
demonstration transitions, and the average demonstration episode length.
"""
self._rl_agent = rl_agent
self._config = config
self._demonstrations_fn = demonstrations_fn
super().__init__()
def make_learner(
self,
random_key: networks_lib.PRNGKey,
networks: DirectRLNetworks,
dataset: Iterator[reverb.ReplaySample],
logger: loggers.Logger,
replay_client: Optional[reverb.Client] = None,
counter: Optional[counting.Counter] = None,
) -> core.Learner:
return self._rl_agent.make_learner(
random_key=random_key,
networks=networks,
dataset=dataset,
logger=logger,
replay_client=replay_client,
counter=counter)
def make_replay_tables(
self, environment_spec: specs.EnvironmentSpec) -> List[reverb.Table]:
return self._rl_agent.make_replay_tables(environment_spec)
def make_dataset_iterator(
self,
replay_client: reverb.Client) -> Optional[Iterator[reverb.ReplaySample]]:
# make_dataset_iterator is only called once (per learner), to pass the
# iterator to make_learner. By using adders we ensure the transition types
# (e.g. n-step transitions) that the direct RL agent expects.
if self._config.num_transitions_rb > 0:
def prefill_thread():
# Populating the replay buffer with the direct RL agent guarantees that
# a constant reward will be used, not the imitation reward.
prefill_reward = (
self._config.alpha
if self._config.prefill_constant_reward else None)
_prefill_with_demonstrations(
adder=self._rl_agent.make_adder(replay_client),
demonstrations=list(self._demonstrations_fn().demonstrations),
min_num_transitions=self._config.num_transitions_rb,
reward=prefill_reward)
# Populate the replay buffer in a separate thread, so that the learner
# can sample from the buffer, to avoid blocking on the buffer being full.
threading.Thread(target=prefill_thread, daemon=True).start()
return self._rl_agent.make_dataset_iterator(replay_client)
def make_adder(self, replay_client: reverb.Client) -> adders.Adder:
"""Creates the adder substituting imitation reward."""
pwil_demonstrations = self._demonstrations_fn()
return pwil_adder.PWILAdder(
direct_rl_adder=self._rl_agent.make_adder(replay_client),
pwil_rewarder=rewarder.WassersteinDistanceRewarder(
demonstrations_it=pwil_demonstrations.demonstrations,
episode_length=pwil_demonstrations.episode_length,
use_actions_for_distance=self._config.use_actions_for_distance,
alpha=self._config.alpha,
beta=self._config.beta))
def make_actor(
self,
random_key: PRNGKey,
policy_network: DirectPolicyNetwork,
adder: Optional[adders.Adder] = None,
variable_source: Optional[core.VariableSource] = None,
) -> core.Actor:
return self._rl_agent.make_actor(random_key, policy_network, adder,
variable_source)
|
Computation.py
|
#!/usr/bin/python
import random
import time
import sys
import zmq
from mpi4py import MPI
from multiprocessing import Process, Value
def sensor_reading(port, sensor):
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:" + str(port.value))
while True:
message = socket.recv()
socket.send(time.ctime() + " Value:" + str(sensor.value))
if __name__ == "__main__":
# Sensor data structure
sensor = Value('i', 0)
port = Value('i', int(sys.argv[1]))
# MPI initialization
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Monitoring process creation
p = None
if rank == 0:
p = Process(target=sensor_reading, args=(port,sensor))
p.start()
# Perform computation
for i in range(10):
value = random.randint(0,100)
data = comm.gather(value, root = 0)
if rank == 0:
for i in range(size):
sensor.value += data[i]
print sensor.value
time.sleep(5)
# Monitoring process termination
if rank == 0:
p.terminate()
|
MeasurementsClient.py
|
import logging
import os
import signal
import csv
import time
import shutil
import subprocess
import datetime
import json
import sys
import schedule
import pickle
import threading
with open('config.json', 'r') as f:
config = json.load(f)
#Get the constants for the RIPE Atlas Measurements from config.json.
target = config['Target']
no_of_probes = config['NoOfProbes']
from_countries = config['From']
measure = config['Measure']
packets = config['Packets']
me = config['Me']
size = config['Size']
# Global Constants: Configurations and folder locations
EXTRACTION_RUNNING = False
TRIMMED_LOGS = False
INIT_EXECUTION = True
latency_file = 'output/latency.pickle'
progress_file = 'output/progress.pickle'
human_readable_measurements = 'output/sintra_measurements'
iteration = 0
if not os.path.exists('output'):
os.makedirs('output')
# Output in a preferred format.
if TRIMMED_LOGS:
logging.basicConfig(filename='output/sintra.out', level=logging.INFO, format='%(message)s')
else:
logging.basicConfig(filename='output/sintra.out', level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# A dictionary of dictionaries, to store the output of measurements: [country:[src: rtt]]
whole_dict = dict()
completed_countries = list()
# All meaured endpoints are saved between iterations as pickle files.
try:
with open(latency_file, 'rb') as f:
whole_dict = pickle.load(f)
INIT_EXECUTION = False
except:
logging.info("No existing pickle file. Initialized with empty value for the latency values")
try:
with open(progress_file, 'rb') as f:
completed_countries = pickle.load(f)
except:
logging.info("No existing pickle file. Initialized with empty value for completed countries")
def measure_latency():
global whole_dict
global EXTRACTION_RUNNING
global INIT_EXECUTION
global iteration
current_measurement_file = human_readable_measurements + str(iteration)
if EXTRACTION_RUNNING:
logging.info("Previous measurement still running. Skip this iteration.......................")
else:
t_start = time.time()
EXTRACTION_RUNNING = True
if INIT_EXECUTION:
# Loop through each country in the array of countries.
for country in from_countries:
logging.info('Measuring for country: ' + country)
each_dict = dict()
ripe = subprocess.run("ripe-atlas measure {0} --target {1} --probes {2} --from-country {3} --packets {4} --size {5}".format(measure, target, no_of_probes, country, packets, size), capture_output=True, shell=True, encoding="utf8")
output_str = ripe.stdout
output_line_separated = output_str.split('\n')
for line in output_line_separated:
if len(line) > 1:
entries = line.split()
each_dict[entries[5]] = entries[10][6:-1]
whole_dict[country] = each_dict
completed_countries.append(country)
# Write in a human readable file for every country's iteration.
with open(current_measurement_file, 'w') as f:
f.write(json.dumps(whole_dict))
INIT_EXECUTION = False
else:
# Update the whole_dict incrementally. But not a complete rerun.
logging.info("Todo: Subsequent Execution is not implemented yet...")
iteration += 1
# Record the total run-time
logging.info('Total run time: %s %s', (time.time() - t_start)/60, ' minutes!')
EXTRACTION_RUNNING = False
# Print the dictionary to a local file.
logging.info(whole_dict)
# Write the pickle file periodically to track the progress and persist it to the filesystem
def update_pickle():
global whole_dict
global completed_countries
with open(latency_file, 'wb') as f:
pickle.dump(whole_dict, f)
with open(progress_file, 'wb') as f:
pickle.dump(completed_countries, f)
logging.info('Progress is recorded to the pickle file')
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
# The thread scheduling
schedule.every(1).minutes.do(run_threaded, measure_latency)
schedule.every(2).minutes.do(run_threaded, update_pickle)
# Keep running in a loop.
while True:
schedule.run_pending()
time.sleep(1)
|
enlarger.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from threading import Thread
from gpiozero import OutputDevice
class Enlarger(OutputDevice):
def __init__(self, pin):
super(Enlarger, self).__init__(pin, initial_value=True)
self.printing = False
self.print_thread = None
self.timer_thread = None
self.draw = None
self.state = False
self.length = 0
self.off()
def toggle(self):
if self.printing:
return False
if self.state:
self.off()
else:
self.on()
def on(self):
self.state = True
self._write(False)
def off(self):
self.state = False
self._write(True)
def execute(self, length, draw):
if self.printing:
return False
self.printing = True
self.draw = draw
self.length = length
self.timer_thread = Thread(target=self._timer)
self.print_thread = Thread(target=self._print_off)
self.print_thread.setDaemon(True)
self.timer_thread.setDaemon(True)
self.print_thread.start()
self.timer_thread.start()
def _timer(self):
initial = self.length
while self.length > 0:
self.draw(self.length)
if not self.printing:
self.draw(initial)
return
time.sleep(0.2)
self.draw(initial)
def _print_off(self):
self.on()
end_time = time.time() + self.length
while self.length > 0:
if not self.printing:
return
time.sleep(0.05)
self.length -= 0.05
if time.time() >= end_time:
break
self.printing = False
self.off()
self.length = 0
def cancel(self):
self.off()
self.printing = False
self.print_thread = None
self.timer_thread = False
self.length = 0
|
rocket.py
|
# -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Import System Modules
import sys
import errno
import socket
import logging
import platform
from gluon._compat import iteritems, to_bytes, to_unicode, StringIO
from gluon._compat import urllib_unquote, to_native, PY2
# Define Constants
VERSION = '1.2.6'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (
SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
if hasattr(sys, 'frozen'):
# py2installer
IS_JYTHON = False
else:
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS)
PY3K = not PY2
class NullHandler(logging.Handler):
"""A Logging handler to prevent library errors."""
def emit(self, record):
pass
b = to_bytes
u = to_unicode
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket/__init__.py
# Monolithic build...start of module: rocket/connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
# from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
import errno
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket/connection.py
# Monolithic build...start of module: rocket/filelike.py
# Import System Modules
import socket
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket/filelike.py
# Monolithic build...start of module: rocket/futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future(object):
pass
class ThreadPoolExecutor(object):
pass
class _WorkItem(object):
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name +
'A future already exists with that name.')
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"""Futures middleware that adds a Futures Executor to the environment"""
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ':' in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
if PY3K:
from queue import Queue
else:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces=('127.0.0.1', 8000),
method='wsgi',
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads)
# Build our socket listeners
self.listeners = [Listener(
i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(
['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
raise DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads=10,
server_name=None,
max=-1,
request_queue_size=5,
timeout=10,
shutdown_timeout=5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads=numthreads,
max_threads=max_threads,
queue_size=request_queue_size,
timeout=timeout)
# Monolithic build...end of module: rocket/main.py
# Monolithic build...start of module: rocket/monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def isAlive(self):
if self.active is None:
return False
return True
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (
c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug(
'Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
# active_threads = [t for t in self.threads if t.isAlive()]
# while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|PATCH|TRACE|CONNECT) # Req Method
\ # single space
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # single space
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.' + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip=conn.client_addr,
time=datetime.now().strftime('%c'),
status=self.status.split(' ')[0],
size=self.size,
request_line=self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout('Socket timed out before request.')
except TypeError:
raise SocketClosed(
'SSL bug caused closure of socket. See '
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".')
d = d.strip()
if not d:
if __debug__:
self.err_log.debug(
'Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k, v in iteritems(req):
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join(
[urllib_unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([urllib_unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning(
'Client sent invalid header: ' + repr(l))
if l.strip().replace('\0', '') == '':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ' ' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"""Exception for when a socket times out between requests."""
pass
class BadRequest(Exception):
"""Exception for when a client sends an incomprehensible request."""
pass
class SocketClosed(Exception):
"""Exception for when a socket is closed by the client."""
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict(
{'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in iteritems(self.read_headers(sock_file)):
environ[str('HTTP_' + k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and to_native(ssl.DER_cert_to_PEM_cert(peercert))
except Exception:
print(sys.exc_info()[1])
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'Date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'Server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'Content-Length' in h_set:
self.size = int(h_set['Content-Length'])
else:
s = int(self.status.split(' ')[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != 'HTTP/1.1':
# Add a Content-Length header because it's not there
self.size = len(data)
h_set['Content-Length'] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'Connection' not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('Connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b'%x\r\n%s\r\n' % (len(data), to_bytes(data, 'ISO-8859-1')))
else:
self.conn.sendall(to_bytes(data))
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
if self.chunked and self.request_method != 'HEAD':
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output, 'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
def demo_app(environ, start_response):
global static_folder
import os
types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif',
'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'}
if static_folder:
if not static_folder.startswith('/'):
static_folder = os.path.join(os.getcwd(),static_folder)
path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html')
type = types.get(path.split('.')[-1],'text')
if os.path.exists(path):
try:
data = open(path,'rb').read()
start_response('200 OK', [('Content-Type', type)])
except IOError:
start_response('404 NOT FOUND', [])
data = '404 NOT FOUND'
else:
start_response('500 INTERNAL SERVER ERROR', [])
data = '500 INTERNAL SERVER ERROR'
else:
start_response('200 OK', [('Content-Type', 'text/html')])
data = '<html><body><h1>Hello from Rocket Web Server</h1></body></html>'
return [data]
def demo():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1",
help="ip address of the network interface")
parser.add_option("-p", "--port", dest="port",default="8000",
help="post where to run web server")
parser.add_option("-s", "--static", dest="static",default=None,
help="folder containing static files")
(options, args) = parser.parse_args()
global static_folder
static_folder = options.static
print('Rocket running on %s:%s' % (options.ip, options.port))
r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app})
r.start()
if __name__=='__main__':
demo()
|
red_test.py
|
#!/usr/bin/env python
import logging
from redbot.resource import HttpResource
import redbot.speak as rs
import thor
import threading
from tornado import gen
from tornado.options import parse_command_line
from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase
from tornado.web import RequestHandler, Application, asynchronous
import unittest
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello world")
class RedirectHandler(RequestHandler):
def get(self, path):
self.redirect(path, status=int(self.get_argument('status', '302')))
class PostHandler(RequestHandler):
def post(self):
assert self.get_argument('foo') == 'bar'
self.redirect('/hello', status=303)
class ChunkedHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.write('hello ')
yield gen.Task(self.flush)
self.write('world')
yield gen.Task(self.flush)
self.finish()
class TestMixin(object):
def get_handlers(self):
return [
('/hello', HelloHandler),
('/redirect(/.*)', RedirectHandler),
('/post', PostHandler),
('/chunked', ChunkedHandler),
]
def get_app_kwargs(self):
return dict(static_path='.')
def get_allowed_warnings(self):
return [
# We can't set a non-heuristic freshness at the framework level,
# so just ignore this warning
rs.FRESHNESS_HEURISTIC,
# For our small test responses the Content-Encoding header
# wipes out any gains from compression
rs.CONNEG_GZIP_BAD,
]
def get_allowed_errors(self):
return []
def check_url(self, path, method='GET', body=None, headers=None,
expected_status=200, allowed_warnings=None,
allowed_errors=None):
url = self.get_url(path)
red = self.run_redbot(url, method, body, headers)
if not red.response.complete:
if isinstance(red.response.http_error, Exception):
logging.warning((red.response.http_error.desc, vars(red.response.http_error), url))
raise red.response.http_error.res_error
else:
raise Exception("unknown error; incomplete response")
self.assertEqual(int(red.response.status_code), expected_status)
allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings()
allowed_errors = (allowed_errors or []) + self.get_allowed_errors()
errors = []
warnings = []
for msg in red.response.notes:
if msg.level == 'bad':
logger = logging.error
if not isinstance(msg, tuple(allowed_errors)):
errors.append(msg)
elif msg.level == 'warning':
logger = logging.warning
if not isinstance(msg, tuple(allowed_warnings)):
warnings.append(msg)
elif msg.level in ('good', 'info', 'uri'):
logger = logging.info
else:
raise Exception('unknown level' + msg.level)
logger('%s: %s (%s)', msg.category, msg.show_summary('en'),
msg.__class__.__name__)
logger(msg.show_text('en'))
self.assertEqual(len(warnings) + len(errors), 0,
'Had %d unexpected warnings and %d errors' %
(len(warnings), len(errors)))
def run_redbot(self, url, method, body, headers):
red = HttpResource(url, method=method, req_body=body,
req_hdrs=headers)
def work():
red.run(thor.stop)
thor.run()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=work)
thread.start()
self.wait()
thread.join()
return red
def test_hello(self):
self.check_url('/hello')
def test_static(self):
# TODO: 304 responses SHOULD return the same etag that a full
# response would. We currently do for If-None-Match, but not
# for If-Modified-Since (because IMS does not otherwise
# require us to read the file from disk)
self.check_url('/static/red_test.py',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_static_versioned_url(self):
self.check_url('/static/red_test.py?v=1234',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_redirect(self):
self.check_url('/redirect/hello', expected_status=302)
def test_permanent_redirect(self):
self.check_url('/redirect/hello?status=301', expected_status=301)
def test_404(self):
self.check_url('/404', expected_status=404)
def test_post(self):
body = 'foo=bar'
# Without an explicit Content-Length redbot will try to send the
# request chunked.
self.check_url(
'/post', method='POST', body=body,
headers=[('Content-Length', str(len(body))),
('Content-Type', 'application/x-www-form-urlencoded')],
expected_status=303)
def test_chunked(self):
self.check_url('/chunked')
class DefaultHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), **self.get_app_kwargs())
class GzipHTTPTest(AsyncHTTPTestCase, LogTrapTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs())
def get_allowed_errors(self):
return super(GzipHTTPTest, self).get_allowed_errors() + [
# TODO: The Etag is supposed to change when Content-Encoding is
# used. This should be fixed, but it's difficult to do with the
# way GZipContentEncoding fits into the pipeline, and in practice
# it doesn't seem likely to cause any problems as long as we're
# using the correct Vary header.
rs.VARY_ETAG_DOESNT_CHANGE,
]
if __name__ == '__main__':
parse_command_line()
unittest.main()
|
wrap_rank.py
|
"""
wrap_rank.py prefixes every line of output from a worker process with the rank
that emitted it.
In distributed training, the rank prefix added by wrap_rank.py is necessary for
the WebUI log viewer's filter-by-rank feature to work.
Additionally, when used in a Determined container, wrap_rank.py redirects stdout
and stderr of the worker process to the stdout and stderr of the container. The
purpose of this is to save network bandwidth when launchers like mpirun or
horovodrun are used, as they often are configured to send all logs from worker
nodes to the chief node over the network. This may be disabled with the
``--no-redirect-stdio`` flag.
"""
import argparse
import contextlib
import os
import subprocess
import sys
import threading
from typing import BinaryIO, List
from determined import constants
def forward_stream(src_stream: BinaryIO, dst_stream: BinaryIO, rank: str) -> None:
for line in iter(src_stream.readline, b""):
line = f"[rank={rank}] ".encode() + line
os.write(dst_stream.fileno(), line)
def run_all(ts: List[threading.Thread]) -> None:
for t in ts:
t.start()
for t in ts:
t.join()
def main() -> int:
parser = argparse.ArgumentParser(
usage="wrap_rank.py [-h] [--no-redirect-stdio] RANK SCRIPT...",
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--no-redirect-stdio", action="store_true")
parser.add_argument(
"rank",
metavar="RANK",
help=(
"Can be an integer rank or a comma-separated list of "
"names of environment variables which are tried, in order, "
"to determine an integer rank."
),
)
parser.add_argument(
"script", nargs=argparse.REMAINDER, metavar="SCRIPT...", help="The worker command."
)
args = parser.parse_args()
if set("0123456789") >= set(args.rank):
# Rank is provided as a number.
rank = int(args.rank)
else:
# Rank is provided as the name of an environment variable.
for r in args.rank.split(","):
if r in os.environ:
rank = int(os.environ[r])
break
else:
print(
f"rank environment variable is set to {args.rank}, but it is not in os.environ",
file=sys.stderr,
)
return 1
proc = subprocess.Popen(args.script, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
with contextlib.ExitStack() as exit_stack:
if os.path.exists(constants.CONTAINER_STDOUT) and not args.no_redirect_stdio:
stdout = exit_stack.enter_context(open(constants.CONTAINER_STDOUT, "w"))
stderr = exit_stack.enter_context(open(constants.CONTAINER_STDERR, "w"))
else:
stdout = sys.stdout
stderr = sys.stderr
run_all(
[
threading.Thread(target=forward_stream, args=(proc.stdout, stdout, rank)),
threading.Thread(target=forward_stream, args=(proc.stderr, stderr, rank)),
]
)
return proc.wait()
if __name__ == "__main__":
sys.exit(main())
|
utils.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import importlib
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from io import BytesIO
from subprocess import PIPE, Popen
from typing import Optional
from urllib.error import ContentTooShortError, HTTPError, URLError
import numpy as np
import torch
import torch.distributed as dist
from monai.config.deviceconfig import USE_COMPILED
from monai.data import create_test_image_2d, create_test_image_3d
from monai.utils import ensure_tuple, optional_import, set_determinism
from monai.utils.module import get_torch_version_tuple
nib, _ = optional_import("nibabel")
ver, has_pkg_res = optional_import("pkg_resources", name="parse_version")
quick_test_var = "QUICKTEST"
def test_pretrained_networks(network, input_param, device):
try:
net = network(**input_param).to(device)
except (URLError, HTTPError, ContentTooShortError) as e:
raise unittest.SkipTest(e)
return net
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
if has_pkg_res:
self.version_too_old = ver(torch.__version__) < ver(".".join(map(str, self.min_version)))
else:
self.version_too_old = get_torch_version_tuple() < self.min_version
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than or equal to that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
if has_pkg_res:
self.version_too_new = ver(torch.__version__) >= ver(".".join(map(str, self.max_version)))
else:
self.version_too_new = get_torch_version_tuple() >= self.max_version
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def make_nifti_image(array, affine=None):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
temp_f, image_name = tempfile.mkstemp(suffix=".nii.gz")
nib.save(test_image, image_name)
os.close(temp_f)
return image_name
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random if random_state is None else random_state
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self._original_method = torch.multiprocessing.get_start_method(allow_none=False)
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
torch.cuda.set_device(int(local_rank))
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
if self.method:
try:
torch.multiprocessing.set_start_method(self.method, force=True)
except (RuntimeError, ValueError):
pass
processes = []
results = torch.multiprocessing.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = torch.multiprocessing.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results)
)
if self.daemon is not None:
p.daemon = self.daemon
p.start()
processes.append(p)
for p in processes:
p.join()
if self.method:
try:
torch.multiprocessing.set_start_method(self._original_method, force=True)
except (RuntimeError, ValueError):
pass
assert results.get(), "Distributed call failed."
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
self._original_method = torch.multiprocessing.get_start_method(allow_none=False) # remember the default method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
if self.method:
try:
torch.multiprocessing.set_start_method(self.method, force=True)
except (RuntimeError, ValueError):
pass
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = torch.multiprocessing.Queue()
p = torch.multiprocessing.Process(target=TimedCall.run_process, args=(func, args, kwargs, results))
if self.daemon is not None:
p.daemon = self.daemon
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
finally:
if self.method:
try:
torch.multiprocessing.set_start_method(self._original_method, force=True)
except (RuntimeError, ValueError):
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
global _original_funcs
_original_funcs[obj.__name__] = obj
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(self.im_shape[0], self.im_shape[1], 4, 20, 0, self.num_classes)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(self.im_shape[0], self.im_shape[1], self.im_shape[2], 4, 20, 0, self.num_classes)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, eval_nets=True, device=None, rtol=1e-4):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned. Both `net` and its
reloaded copy are set to evaluation mode if `eval_nets` is True. The forward pass for both is done without
gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
if True:
device = "cpu"
else:
# TODO: It would be nice to be able to use GPU if
# available, but this currently causes CI failures.
if not device:
device = "cuda" if torch.cuda.is_available() else "cpu"
# Convert to device
inputs = [i.to(device) for i in inputs]
scripted = torch.jit.script(net.cpu())
buffer = scripted.save_to_buffer()
reloaded_net = torch.jit.load(BytesIO(buffer)).to(device)
net.to(device)
if eval_nets:
net.eval()
reloaded_net.eval()
with torch.no_grad():
set_determinism(seed=0)
result1 = net(*inputs)
result2 = reloaded_net(*inputs)
set_determinism(seed=None)
# convert results to tuples if needed to allow iterating over pairs of outputs
result1 = ensure_tuple(result1)
result2 = ensure_tuple(result2)
for i, (r1, r2) in enumerate(zip(result1, result2)):
if None not in (r1, r2): # might be None
np.testing.assert_allclose(
r1.detach().cpu().numpy(),
r2.detach().cpu().numpy(),
rtol=rtol,
atol=0,
err_msg=f"failed on comparison number: {i}",
)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids.
"""
bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
free_memory[1] += free_memory[0] # combine 0/1 column measures
ids = np.lexsort(free_memory)[:n]
except (FileNotFoundError, TypeError, IndexError):
ids = range(n) if isinstance(n, int) else []
return ",".join([f"{int(x)}" for x in ids])
if __name__ == "__main__":
print(query_memory())
|
lab8_e.py
|
from sys import setrecursionlimit
import threading
import queue
setrecursionlimit(10 ** 9)
threading.stack_size(67108864)
def main():
file_input, file_output = open('pathbge1.in', 'r'), open('pathbge1.out','w')
#non-recursive depth-first-searchw with dist minimization
def bfs(graph, source, n):
visited, distances, que = [], [0] * n, queue.Queue()
que.put(source)
while not que.empty():
temp = que.get()
visited.append(temp)
for index in graph.get(temp):
if distances[index] == 0:
distances[index] = distances[temp] + 1
que.put(index)
distances[0] = 0
return distances
n, m = map(int, file_input.readline().split())
graph = {}
#contigious vortexes list creation in dict
for _ in range(m):
current = list(map(int, file_input.readline().split()))
if graph.get(current[0] - 1) is not None and current[1] - 1 not in graph.get(current[0] - 1):
graph.update({current[0] - 1 : graph.get(current[0] - 1) + [current[1] - 1]})
elif graph.get(current[0] - 1) is None:
graph.update({current[0] - 1 : [current[1] - 1]})
if graph.get(current[1] - 1) is not None and current[0] - 1 not in graph.get(current[1] - 1):
graph.update({current[1] - 1 : graph.get(current[1] - 1) + [current[0] - 1]})
elif graph.get(current[1] - 1) is None:
graph.update({current[1] - 1 : [current[0] - 1]})
# print(graph)
print(*bfs(graph, 0, n), file=file_output)
file_output.close()
thread = threading.Thread(target=main)
thread.start()
|
GeniusLyricsGUI.py
|
import os
import threading
from tkinter import *
from tkinter import messagebox
from GeniusLyrics import search_song_lyrics, song_dict
class GeniusLyricsGUI:
def __init__(self, tk_root: Tk):
tk_root.resizable(width=False, height=False)
self.lyrics_frame = Frame(tk_root)
self.songName = StringVar()
self.artistName = StringVar()
self.top_section = LabelFrame(self.lyrics_frame, font="Calibri", pady=2)
Label(self.top_section, text="Song").pack(side=LEFT, padx=4)
self.songEntry = Entry(self.top_section, textvariable=self.songName)
self.songEntry.pack(side=LEFT, padx=4)
Label(self.top_section, text="Artist").pack(side=LEFT, padx=4)
self.artistName = Entry(self.top_section, textvariable=self.artistName)
self.artistName.pack(side=LEFT, padx=4)
self.searchButton = Button(self.top_section, text="Search")
self.searchButton.bind("<Button-1>", self.search)
tk_root.bind("<Return>", self.search)
self.searchButton.pack(side=LEFT)
# create scrollbar
self.scrollbar = Scrollbar(self.lyrics_frame)
self.top_section.pack(side=TOP)
self.output = Text(self.lyrics_frame, font="Calibri 11", width=self.top_section.winfo_width(), height=25)
self.lyrics_frame.pack()
def search(self, event=None):
def thread_func():
os.system("cls")
print(f"Song: {self.songName.get()}\nArtist: {self.artistName.get()}\n")
lyrics = search_song_lyrics(self.songName.get(), self.artistName.get())
if lyrics:
if lyrics[0] == "\n" and lyrics[1] == "\n":
lyrics = lyrics[2:]
print(lyrics)
# pack and attach to textbox
self.scrollbar.pack(side=RIGHT, fill=Y)
self.output.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.output.yview)
# pack output
self.output.pack(side=BOTTOM, fill=BOTH)
self.output.delete(1.0, "end")
self.output.insert(1.0, lyrics)
else:
messagebox.showinfo(
"Failed",
f"Couldn't find lyrics for\n'{self.songName.get()}' by '{self.artistName.get()}'"
)
search_thread = threading.Thread(target=thread_func)
search_thread.start()
def main():
root = Tk()
root.title('Music Metadata & Lyrics')
root.resizable(width=False, height=False)
GeniusLyricsGUI(root)
root.mainloop()
if __name__ == "__main__":
main()
|
test_win32file.py
|
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
now_utc = win32timezone.utcnow()
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
now = datetime.datetime.now(tz=TimeZoneInfo.local())
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
if issubclass(pywintypes.TimeType, datetime.datetime):
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestGetFileInfoByHandleEx(unittest.TestCase):
__handle = __filename = None
def setUp(self):
fd, self.__filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
if self.__handle is not None:
self.__handle.Close()
if self.__filename is not None:
try:
os.unlink(self.__filename)
except OSError:
pass
self.__handle = self.__filename = None
def testFileBasicInfo(self):
attr = win32file.GetFileAttributes(self.__filename)
f = win32file.CreateFile(self.__filename, win32file.GENERIC_READ, 0, None,
win32con.OPEN_EXISTING, 0, None)
self.__handle = f
ct, at, wt = win32file.GetFileTime(f)
# bug #752: this throws ERROR_BAD_LENGTH (24) in x86 binaries of build 221
basic_info = win32file.GetFileInformationByHandleEx(f, win32file.FileBasicInfo)
self.assertEqual(ct, basic_info['CreationTime'])
self.assertEqual(at, basic_info['LastAccessTime'])
self.assertEqual(wt, basic_info['LastWriteTime'])
self.assertEqual(attr, basic_info['FileAttributes'])
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error as details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print("waiting", dh)
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print("got", changes)
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print("ERROR: Watcher thread timed-out!")
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print("FAILED to remove directory", dn)
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print("FAILED to wait for thread termination")
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error as details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print("It appears this is not NTFS - cant encrypt/decrypt")
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024*1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error as e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error as e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error as e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error as e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
|
train.py
|
import sys
import os
import argparse
from setup.settings import hparams, preprocessing
import math
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/nmt")
from nmt import nmt
import tensorflow as tf
import colorama
from threading import Thread
from setup.custom_summary import custom_summary
colorama.init()
def train():
print('\n\n{}Training model...{}\n'.format(colorama.Fore.GREEN, colorama.Fore.RESET))
if preprocessing['epochs'] is not None:
# Load corpus size, calculate number of steps
with open('{}/corpus_size'.format(preprocessing['train_folder']), 'r') as f:
corpus_size = int(f.read())
# Load current train progress
try:
with open('{}epochs_passed'.format(hparams['out_dir']), 'r') as f:
initial_epoch = int(f.read())
except:
initial_epoch = 0
# Iterate through epochs
for epoch, learning_rate in enumerate(preprocessing['epochs']):
# Check if model already passed that epoch
if epoch < initial_epoch:
print('{}Epoch: {}, learning rate: {} - already passed{}'.format(colorama.Fore.GREEN, epoch + 1, learning_rate, colorama.Fore.RESET))
continue
# Calculate new number of training steps - up to the end of current epoch
num_train_steps = math.ceil((epoch + 1) * corpus_size / (hparams['batch_size'] if 'batch_size' in hparams else 128))
print("\n{}Epoch: {}, steps per epoch: {}, epoch ends at {} steps, learning rate: {} - training{}\n".format(
colorama.Fore.GREEN,
epoch + 1,
math.ceil(corpus_size / (hparams['batch_size'] if 'batch_size' in hparams else 128)),
num_train_steps,
learning_rate,
colorama.Fore.RESET
))
# Override hparams
hparams['num_train_steps'] = num_train_steps
hparams['learning_rate'] = learning_rate
hparams['override_loaded_hparams'] = True
# Run TensorFlow threaded (exits on finished training, but we want to train more)
thread = Thread(target=nmt_train)
thread.start()
thread.join()
# Save epoch progress
with open('{}epochs_passed'.format(hparams['out_dir']), 'w') as f:
f.write(str(epoch + 1))
else:
nmt_train()
print('\n\n{}Training finished{}\n'.format(colorama.Fore.GREEN, colorama.Fore.RESET))
def nmt_train():
nmt_parser = argparse.ArgumentParser()
nmt.add_arguments(nmt_parser)
nmt.FLAGS, unparsed = nmt_parser.parse_known_args(['--'+k+'='+str(v) for k,v in hparams.items()])
nmt.summary_callback = custom_summary
# Run TF with modified arguments
tf.app.run(main=nmt.main, argv=[os.getcwd() + '\nmt\nmt\nmt.py'] + unparsed)
train()
|
dashboard.py
|
#!/usr/bin/env python
import Tkinter as tk
import cv2
import os
import tensorflow as tf
import align.detect_face
import facenet
import tkMessageBox
import argparse
import time
import shutil
import threading
import change_name_dialog
import rospy
import numpy as np
from scipy import misc
from Tkinter import *
from PIL import Image, ImageTk
from std_msgs.msg import String
scan_list = []
scanning = False
nrof_images = 1
def mark_faces(frame):
height, width, _ = frame.shape
faces, boxes = align_data([frame], 160, 44, pnet, rnet, onet)
if boxes is not None:
for box in boxes:
if box[4] > 0.95:
frame = cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
frame = cv2.putText(frame, "{:.2%}".format(box[4]), (int(box[0]), int(box[1] - 10)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 1, cv2.LINE_AA)
if scanning:
frame = cv2.rectangle(frame, (int((width / 2) - 202), 8), (int((width / 2) + 202), 32),
(0, 51, 153), 2)
frame = cv2.rectangle(frame, (int((width / 2) - 200), 10),
(int((width / 2) + ((20 * (nrof_images-1)) - 200)), 30), (102, 153, 255),
cv2.FILLED)
return frame, faces
def align_data(image_list, image_size, margin, pnet, rnet, onet):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
img_list = []
boxes = []
for x in xrange(len(image_list)):
img_size = np.asarray(image_list[x].shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(image_list[x], minsize, pnet, rnet, onet, threshold, factor)
nrof_samples = len(bounding_boxes)
if nrof_samples > 0:
for i in xrange(nrof_samples):
if bounding_boxes[i][4] > 0.95:
det = np.squeeze(bounding_boxes[i, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = image_list[x][bb[1]:bb[3], bb[0]:bb[2], :]
# Check if found face isn't to blurry
if cv2.Laplacian(cropped, cv2.CV_64F).var() > 100:
aligned = misc.imresize(cropped, (image_size, image_size), interp="bilinear")
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
boxes.append(bounding_boxes[i])
if len(img_list) > 0:
images = np.stack(img_list)
return images, boxes
return None, None
def load_labels_from_folder(folder):
labels = []
for dir in os.listdir(folder):
if os.path.isdir(os.path.join(folder, dir)):
if dir != "Unknown":
labels.append(dir)
return labels
def create_network_face_detection():
gpu_memory_fraction = 0.20
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
return pnet, rnet, onet
def fill_listbox():
selection = ""
if list_box.curselection():
selection = list_box.get(list_box.curselection()[0])
list_box.delete(0, END)
for label in load_labels_from_folder(folder):
list_box.insert(END, label.replace("_", " "))
if selection != "":
for i in range(list_box.size()):
if list_box.get(i) == selection:
list_box.select_set(i)
break
root.after(10000, fill_listbox)
def show_frame():
global scan_list
_, frame = cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
cv2image = cv2.resize(cv2image, (750, 500))
cv2image, scan_list = mark_faces(cv2image)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
lmain.after(10, show_frame)
def add_customer():
name = name_entry.get()
if name is not "":
scan_face(name)
else:
tkMessageBox.showerror("Error!", "Please enter a name.")
def remove_customer():
if list_box.curselection():
result = tkMessageBox.askquestion("Delete person?",
"Are you sure you want to delete " + list_box.get(
list_box.curselection()[0]) + "?",
icon='warning')
if result == "yes":
shutil.rmtree(os.path.join(folder, list_box.get(list_box.curselection()[0]).replace(" ", "_")))
list_box.delete(list_box.curselection()[0])
time.sleep(1)
pub.publish("Train")
else:
tkMessageBox.showerror("Error!", "No client selected.")
def scan_face_thread(name):
global train
global nrof_images
global scanning
scanning = True
nrof_images = 1
path = os.path.join(folder, name)
while nrof_images <= 20:
if scan_list is not None:
misc.imsave(os.path.join(path, name + '_' + str('%0*d' % (4, nrof_images)) + '.png'), scan_list[0])
nrof_images += 1
time.sleep(0.5)
scanning = False
tkMessageBox.showinfo("Scan done", "Scanning of customer face is done!")
pub.publish("Train")
def scan_face(name):
global scanning
scanning = False
while not scanning:
if scan_list is not None:
list_box.insert(END, name)
list_box.select_set(END)
name_entry.delete(0, "end")
tkMessageBox.showinfo("Starting scan.", "Starting face scan. This will take approximately 10 seconds."
"\nDuring the scan rotate your head slightly to the left and right."
"\nPress OK to continue.")
os.mkdir(os.path.join(folder, name.replace(" ", "_")))
threading.Thread(target=scan_face_thread, args=(name.replace(' ', '_'), )).start()
else:
tkMessageBox.showerror("Error!", "There is no face in the current frame."
"\nPress OK and try again.")
def show_dialog():
if list_box.curselection():
inputDialog = change_name_dialog.MyDialog(root, folder, list_box.get(list_box.curselection()[0]))
root.wait_window(inputDialog.top)
fill_listbox()
else:
tkMessageBox.showerror("Error!", "No client selected.")
if __name__ == "__main__":
rospy.init_node("customer_dashboard")
pub = rospy.Publisher("train_command", String, queue_size=1)
parser = argparse.ArgumentParser()
parser.add_argument('data_dir', type=str,
help='Path to the data directory containing classifier data.')
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.20)
args = parser.parse_args()
folder = args.data_dir
print("Loading face detection model")
pnet, rnet, onet = create_network_face_detection()
width, height = 1920, 1080
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
root = tk.Tk()
root.title("Customer dashboard")
#root.wm_iconbitmap("@/home/maarten/Pictures/icon2.xbm")
root.resizable(width=False, height=False)
root.geometry("{}x{}".format(1000, 500))
root.bind("<Escape>", lambda e: root.quit())
left_container = Frame(root, bg="darkgrey", width=300, height=500)
center_container = Frame(root, bg="darkgrey", width=700, height=500)
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(1, weight=1)
left_container.grid(row=0, column=0, sticky="nsew")
center_container.grid(row=0, column=1, sticky="nsew")
left_container.grid_rowconfigure(5, weight=height)
left_container.grid_columnconfigure(1, weight=width)
list_box = Listbox(left_container, height=20)
fill_listbox()
list_box.grid(row=2, columnspan=2, padx=10, pady=10, sticky="nsew")
remove_button = Button(left_container, text="Remove selected client", command=remove_customer)
name_label = Label(left_container, text="Name:")
name_entry = Entry(left_container)
add_button = Button(left_container, text="Add new client", command=add_customer)
change_button = Button(left_container, text="Change selected client name.", command=show_dialog)
remove_button.grid(row=3, columnspan=2, padx=10, sticky="nsew")
name_label.grid(row=0, column=0, padx=(10, 5), pady=10, sticky="nsew")
name_entry.grid(row=0, column=1, padx=(5, 10), pady=10, sticky="ew")
add_button.grid(row=1, columnspan=2, padx=10, sticky="nsew")
change_button.grid(row=4, columnspan=2, padx=10, pady=10, sticky="nsew")
lmain = Label(center_container, bg="darkgrey")
lmain.pack(fill=BOTH, expand=True, padx=10, pady=10)
show_frame()
root.mainloop()
|
zconfig.py
|
#!/usr/bin/python -OO
# Copyright 2008-2017 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.zconfig - bonjour/zeroconfig support
"""
import os
import logging
import cherrypy
_HOST_PORT = (None, None)
try:
from sabnzbd.utils import pybonjour
from threading import Thread
_HAVE_BONJOUR = True
except:
_HAVE_BONJOUR = False
import sabnzbd
import sabnzbd.cfg as cfg
from sabnzbd.misc import match_str
_BONJOUR_OBJECT = None
def hostname():
""" Return host's pretty name """
if sabnzbd.WIN32:
return os.environ.get('computername', 'unknown')
try:
return os.uname()[1]
except:
return 'unknown'
def _zeroconf_callback(sdRef, flags, errorCode, name, regtype, domain):
logging.debug('Full Bonjour-callback sdRef=%s, flags=%s, errorCode=%s, name=%s, regtype=%s, domain=%s',
sdRef, flags, errorCode, name, regtype, domain)
if errorCode == pybonjour.kDNSServiceErr_NoError:
logging.info('Registered in Bonjour as "%s" (%s)', name, domain)
def set_bonjour(host=None, port=None):
""" Publish host/port combo through Bonjour """
global _HOST_PORT, _BONJOUR_OBJECT
if not _HAVE_BONJOUR or not cfg.enable_bonjour():
logging.info('No Bonjour/ZeroConfig support installed')
return
if host is None and port is None:
host, port = _HOST_PORT
else:
_HOST_PORT = (host, port)
scope = pybonjour.kDNSServiceInterfaceIndexAny
zhost = None
domain = None
if match_str(host, ('localhost', '127.0.', '::1')):
logging.info('Bonjour/ZeroConfig does not support "localhost"')
# All implementations fail to implement "localhost" properly
# A false address is published even when scope==kDNSServiceInterfaceIndexLocalOnly
return
name = hostname()
if '.local' in name:
suffix = ''
else:
suffix = '.local'
if hasattr(cherrypy.wsgiserver, 'redirect_url'):
cherrypy.wsgiserver.redirect_url("https://%s%s:%s/sabnzbd" % (name, suffix, port))
logging.debug('Try to publish in Bonjour as "%s" (%s:%s)', name, host, port)
try:
refObject = pybonjour.DNSServiceRegister(
interfaceIndex=scope,
name='SABnzbd on %s:%s' % (name, port),
regtype='_http._tcp',
domain=domain,
host=zhost,
port=int(port),
txtRecord=pybonjour.TXTRecord({'path': '/sabnzbd/'}),
callBack=_zeroconf_callback)
except sabnzbd.utils.pybonjour.BonjourError:
_BONJOUR_OBJECT = None
logging.debug('Failed to start Bonjour service')
else:
Thread(target=_bonjour_server, args=(refObject,))
_BONJOUR_OBJECT = refObject
logging.debug('Successfully started Bonjour service')
def _bonjour_server(refObject):
while 1:
pybonjour.DNSServiceProcessResult(refObject)
logging.debug('GOT A BONJOUR CALL')
def remove_server():
""" Remove Bonjour registration """
global _BONJOUR_OBJECT
if _BONJOUR_OBJECT:
_BONJOUR_OBJECT.close()
_BONJOUR_OBJECT = None
|
usbcameradriver.py
|
# -*- coding: utf-8 -*-
'''
Created on 24. Sep. 2015
'''
__version__ = '0.0.4'
__author__ = "Dietmar Millinger"
import sys
sys.path.insert(1, '../')
import os
from drivers.driver import *
import time
from datetime import datetime
import threading
from PIL import Image, ImageChops
import math
import numpy as np
from io import BytesIO
stillFolderRoot= '/media/usb'
stillFolder= stillFolderRoot + '/images'
stillImagePeriodMillis = (60*60*1000)
initialStillImagePeriodMillis = (30*1000)
tempStillImageFile= '/tmp/still.jpg'
tempEntropyImageFile= '/tmp/entropy.jpg'
# os.system('fswebcam -r 320x240 -S 3 --jpeg 50 --save /home/pi/to_transmit/%H%M%S.jpg')
class UsbCameraDriver(Driver):
'''
driver for still images via pi internal camera
'''
def __init__(self, parameters, logger ):
'''
Constructor
'''
Driver.__init__(self, parameters, logger )
self.max_entropy= 0
self.debug_mode= False
# SETUP folder for still images
self.ensureStillFolder()
self.sensor_worker= threading.Thread(target=self.run_sensor_worker)
self.sensor_worker.setDaemon(True)
self.sensor_worker.start()
def get_observations(self,container):
entropy= self.max_entropy
self.max_entropy= 0
change_observation= self.observation( 'camera_entropy', self.get_observation_time(), str("{:.1f}".format( entropy )), 'e' );
container.append(change_observation)
self.handle_debug ('driver ' + self.name + ' delivers ' + str(change_observation ) )
def image_entropy(self,img):
w,h = img.size
a = np.array(img.convert('RGB')).reshape((w*h,3))
h,e = np.histogramdd(a, bins=(16,)*3, range=((0,256),)*3)
prob = h/np.sum(h) # normalize
prob = prob[prob>0] # remove zeros
return -np.sum(prob*np.log2(prob))
def ensureStillFolder(self):
try:
os.makedirs(stillFolder, exist_ok=True)
except Exception as e:
pass
def isStillFolderAvailable(self):
try:
if not os.path.isdir(stillFolderRoot):
return False
if not os.path.isdir(stillFolder):
return False
return True
except Exception as e:
pass
return False
def makeStillFileName(self):
timestamp= self.get_observation_time_millis()
return "%s/%d_img.jpg" % (stillFolder,timestamp)
def run_sensor_worker (self):
self.nextStillImageMillis= self.get_observation_time_millis() + initialStillImagePeriodMillis
time.sleep( 5 )
self.last_image = None
self.still_image = None
self.handle_info ('starting camera thread')
while self.shall_run:
try:
#
# ENTROPY PART
#
# make entropy image
os.system('fswebcam -q -r 640x480 -S 3 --jpeg 90 --no-banner --save ' + tempEntropyImageFile + ' >/dev/null 2>&1' )
self.new_image = Image.open(tempEntropyImageFile)
if self.new_image and self.last_image:
diff_image = ImageChops.difference(self.new_image,self.last_image)
entropy= self.image_entropy(diff_image)
if entropy > self.max_entropy:
self.max_entropy= entropy
self.handle_debug ('found entropy {:.1f}'.format(entropy) )
self.last_image= self.new_image
#
# STILL IMAGE PART
#
hour= datetime.now().hour
if self.get_observation_time_millis() > self.nextStillImageMillis and hour > 5 and hour < 20:
self.handle_info ('starting still image at hour ' + str(hour) )
if self.isStillFolderAvailable():
filename= self.makeStillFileName()
result= os.system('fswebcam -q -r 1640x922 -S 3 --jpeg 90 --no-banner --save ' + filename + ' >/dev/null 2>&1' )
try:
value = int(result)
except ValueError:
value = 0
if value:
self.increment_hardware_error_count()
self.handle_info ('fswebcam error ' + str(result) )
else:
self.clear_hardware_error_count()
self.nextStillImageMillis= self.get_observation_time_millis() + stillImagePeriodMillis
else:
self.handle_debug ('did not find still image folder' )
except Exception as e:
time.sleep( 10 )
self.handle_debug ('exception ' + str(e) )
pass
time.sleep( 20 )
if __name__ == '__main__':
sensor= UsbCameraDriver( """{"width":1640,"height":"922"}""" )
for count in range(0,1000):
container= []
sensor.get_observations(container)
print ( container )
time.sleep(10)
|
test_gateway.py
|
import functools
import time
from threading import Thread
import numpy as np
import pytest
import requests
from jina.enums import CompressAlgo
from jina.executors.encoders import BaseEncoder
from jina.flow import Flow
from tests import random_docs
concurrency = 10
class DummyEncoder(BaseEncoder):
def encode(self, data, *args, **kwargs):
pass
@pytest.mark.parametrize('compress_algo', list(CompressAlgo))
def test_compression(compress_algo):
print(str(compress_algo))
f = Flow(compress=str(compress_algo)).add(name='DummyEncoder', parallel=2)
with f:
f.index(random_docs(10))
@pytest.mark.skip('this test hangs up for unknown reason on github, works on local')
def test_rest_gateway_concurrency():
def _request(status_codes, durations, index):
resp = requests.post(
f'http://0.0.0.0:{f.port_expose}/api/index',
json={
'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']})
durations[index] = resp.elapsed.total_seconds()
status_codes[index] = resp.status_code
f = Flow(rest_api=True).add(parallel=2)
with f:
concurrency = 50
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(target=_request, args=(status_codes, durations, i))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
success = status_codes.count(200)
failed = len(status_codes) - success
print(
f'\nmin roundtrip time: {np.min(durations)}\n',
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
# TODO (Deepankar): change this to a Process rather than Thread & test
@pytest.mark.skip('raw grpc gateway is not stable enough under high concurrency')
def test_grpc_gateway_concurrency():
def _input_fn():
return iter([
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC'])
def _validate(req, start, status_codes, durations, index):
end = time.time()
durations[index] = (end - start)
status_codes[index] = req.status.code
def _request(f, status_codes, durations, index):
start = time.time()
f.index(
input_fn=_input_fn,
output_fn=functools.partial(
_validate,
start=start,
status_codes=status_codes,
durations=durations,
index=index
))
f = Flow().add(parallel=2)
with f:
threads = []
status_codes = [None] * concurrency
durations = [None] * concurrency
for i in range(concurrency):
t = Thread(
target=_request, args=(
f, status_codes, durations, i))
threads.append(t)
t.start()
for t in threads:
t.join()
print(f'terminate {t}')
success = status_codes.count(0)
failed = len(status_codes) - success
print(
f'\nmin roundtrip time: {np.min(durations)}\n',
f'max roundtrip time: {np.max(durations)}\n'
f'mean roundtrip time: {np.mean(durations)}\n'
)
assert success >= 1
# In some slow environments, a certain degree of failed
# requests will occur. Here we limit the degree of failed
# requests.
rate = failed / success
assert rate < 0.1
|
webgui02.py
|
# Another simple example of how PyMOL can be controlled using a web browser
# using Python's built-in web server capabilities
try:
import BaseHTTPServer
except ImportError:
import http.server as BaseHTTPServer
import time
import cgi
import threading
import traceback
import os, sys, re
from pymol import cmd
from chempy.sdf import SDF
# example 3D sd file
input_sdf = os.environ['PYMOL_PATH']+"/test/dat/ligs3d.sdf"
class SafeDict(dict):
'''
we will need to synchronize access if we later adopt a
multi-threaded approach
'''
pass
# global singleton class for holding server state
ServerState = SafeDict()
# we're not maintaining session tokens yet...
default_token = 0
def write_table(out, table):
out.write('<html>\n')
out.write('<header>\n')
out.write('<link rel="stylesheet" type="text/css" href="/pymol.css"></link>')
out.write('<script type="text/javascript" src="pymol.js"></script>\n')
out.write('</header>')
out.write('<body>\n')
out.write('<form action="./quit.pymol"><button type="submit">Quit</button></form>\n')
out.write('<table>\n')
header = table['header']
for heading in header:
out.write('<th>')
out.write(heading)
out.write('</th>')
body = table['body']
for row in body:
out.write('<tr>')
for col in row:
out.write('<td>')
out.write(col)
out.write('</td>')
out.write('</tr>\n')
out.write('</table>')
out.write('<iframe name="myStatus" width="500" height="60" frameborder=0>')
out.write('</iframe>\n')
out.write('<form name="hidden" target="myStatus"></form>')
out.write('</body></html>\n')
_server = None
def _shutdown(self_cmd=cmd):
global _server
if _server != None:
_server.socket.close()
self_cmd.quit()
class PymolHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def log_message(self, format, *args):
# nuke logging for the time being since it slows down PyMOL
pass
def do_css(self):
self.send_response(200)
self.send_header('Content-type','text/css')
self.end_headers()
self.wfile.write('''
table {
border-width: 1px 1px 1px 1px;
border-spacing: 1px;
border-style: outset outset outset outset;
border-color: gray gray gray gray;
border-collapse: separate;
background-color: gray;
}
table th {
border-width: 1px 1px 1px 1px;
padding: 2px 5px 2px 5px;
border-style: inset inset inset inset;
border-color: gray gray gray gray;
background-color: lightblue;
-moz-border-radius: 0px 0px 0px 0px;
}
table td {
border-width: 1px 1px 1px 1px;
padding: 2px 5px 2px 5px;
border-style: inset inset inset inset;
border-color: gray gray gray gray;
background-color: white;
-moz-border-radius: 0px 0px 0px 0px;
}
a:link {text-decoration: none; color: blue; }
a:visited {text-decoration: none; color: blue; }
a:active {text-decoration: none; color: blue; }
''')
def do_js(self):
self.send_response(200)
self.send_header('Content-type','text/javascript')
self.end_headers()
self.wfile.write('''
function load(molid)
{
// unnecessary...but keeping it around for later...
// this doesnt actually work anyway!
document.forms['hidden'].method='get';
document.forms['hidden'].action='http://localhost:8080/load.pymol?test=1&molid=' + molid;
document.forms['hidden'].submit();
}
''')
def do_pymol(self):
if "table.pymol" in self.path: # send table
session = ServerState[default_token]
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'no-cache')
self.send_header('Pragma', 'no-cache')
self.end_headers()
if 'table' not in session:
self.wfile.write("<p>No table defined.</p>\n")
else:
write_table(self.wfile, session['table'])
elif "quit.pymol" in self.path:
self.wfile.write('<html><body><p>Quitting...</p></body></html>')
self.wfile.flush()
_shutdown()
elif "load.pymol" in self.path:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'no-cache')
self.send_header('Pragma', 'no-cache')
self.end_headers()
mo = re.search("molid\=([A-Z0-9]+)",self.path)
if mo:
mol_id = mo.groups(1)[0]
session = ServerState[default_token]
mol_dict = session['data']['mol_dict']
self_cmd = session['cmd']
if mol_id in self_cmd.get_names('objects'):
if mol_id in self_cmd.get_names('objects',enabled_only=1):
self.wfile.write("<p>Disabling %s...</p>"%mol_id)
self_cmd.disable(mol_id)
else:
self.wfile.write("<p>Enabling %s...</p>"%mol_id)
self_cmd.enable(mol_id)
else:
self.wfile.write("<p>Loading %s...</p>"%mol_id)
self_cmd.read_molstr(mol_dict[mol_id], mol_id)
self_cmd.show_as("sticks",mol_id)
else:
self.wfile.write("<p>Error processing query: %s</p>"%self.path)
else: # start page
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Cache-control', 'no-cache')
self.send_header('Pragma', 'no-cache')
self.end_headers()
self.wfile.write("<p>Unhandled PyMOL request</p>")
self.wfile.flush()
def do_GET(self):
try:
doc = self.path.split('?')[0]
if doc.endswith('.pymol'): # PyMOL
try:
self.do_pymol()
except:
traceback.print_exc()
elif doc.endswith('.js'): # Javascript
self.do_js()
elif doc.endswith('.css'): # Javascript
self.do_css()
elif doc.endswith('.html'):
f = open('.'+self.path) # UNSAFE!!!
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(f.read())
f.close()
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
def do_POST(self): # not currently used
global rootnode
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
query=cgi.parse_multipart(self.rfile, pdict)
self.send_response(301)
self.end_headers()
upfilecontent = query.get('upfile')
print("filecontent", upfilecontent[0])
self.wfile.write('<HTML>POST OK.<BR><BR>');
self.wfile.write(upfilecontent[0]);
except :
pass
def table_from_data(data):
# pull MOLID to the far left
col_id_list = ['MOLID'] + [x for x in data['col_id_list'] if x!='MOLID']
content = data['content']
# create the header fields
header = []
for col_id in col_id_list:
header.append(col_id)
# create the body
body = []
for row_id in data['row_id_list']:
row = []
for col_id in col_id_list:
if col_id == 'MOLID':
text = content.get( (row_id,col_id),'')
row.append('<a target="myStatus" href="load.pymol?molid=%s">'%text +
text + '</a>')
else:
row.append( content.get( (row_id,col_id),'' ))
body.append(row)
return {
'header' : header,
'body' : body,
}
def data_from_sdf(sdf_file_path):
mol_dict = {}
row_id_list = []
row_id_dict = {}
col_id_list = []
col_id_dict = {}
# first pass, load the identifiers, MOL files, and tag names
col = 0
sdf = SDF(sdf_file_path)
while 1:
rec = sdf.read()
if not rec:
break
mol = rec.get('MOL')
# get the unique identifier
mol_id = mol[0].strip()
# store the MOL record
mol_dict[mol_id] = string.join(mol,'')
# add row (assuming mol_id is unique)
row_id_list.append(mol_id)
row_id_dict[mol_id] = None
# add column (if new)
for key in rec.kees:
if key != 'MOL':
if key not in col_id_dict:
col_id_list.append(key)
col_id_dict[key] = None
# second pass, read the actual data into the table structure
content = {}
sdf = SDF(sdf_file_path)
while 1:
rec = sdf.read()
if not rec:
break
mol_id = rec.get('MOL')[0].strip()
for key in rec.kees:
if key != 'MOL':
content[ (mol_id,key) ] = rec.get(key)[0].strip()
return {
'content' : content,
'row_id_list' : row_id_list,
'col_id_list' : col_id_list,
'mol_dict' : mol_dict
}
def open_browser():
import webbrowser
time.sleep(1)
webbrowser.open('http://localhost:8080/table.pymol')
# import os
# os.system('open http://localhost:8080/start.pymol')
def main():
try:
global _server
_server = BaseHTTPServer.HTTPServer(('', 8080), PymolHandler)
print('started httpserver...')
_server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down server')
_server.socket.close()
if __name__ == '__main__':
print("this script must be run from within PyMOL")
if __name__ == 'pymol':
session = SafeDict()
session['cmd'] = cmd # could replaced by instance instead of module
session['data'] = data_from_sdf(input_sdf)
session['table'] = table_from_data(session['data'])
ServerState[default_token] = session
t = threading.Thread(target=main)
t.setDaemon(1)
t.start()
t = threading.Thread(target=open_browser)
t.setDaemon(1)
t.start()
"""
"""
|
原始版_GSLST.py
|
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import numpy as np
import tkinter as tk
import time
import cv2 as cv
import multiprocessing as mp
import jps_GSLST as J
#注意,不同的线程的self的东西不是共享的
"""
Log
1. 通过border中的点被检索后,remove该点,来减去了team的去重操作
2. 更改了JPS生成的点,现在只会把关键点放到生成路径中,而不是步长为1的所有点(JPS得到的结果可以直接拿来用了)
"""
# each node has varieties:row,col,father
class node:
def __init__(self, r=0, c=0, f=None,flag=0):
self.row = r
self.col = c
self.father = f
self.distance=0
self.flag=flag
father=self.father
while True:
if father == None:
break
self.distance+=np.sqrt((r-father.row)**2+(c-father.col)**2)
r=father.row
c=father.col
father=father.father
class rrt:
# initial the start, end, map
def __init__(self):
# initial map & window
self.height = 800
self.width = 800
# initial extend limitation and ede limitation
self.step_size = 80
self.end_lim = 50
self.start = node(50, 50, None)
self.end = node(700, 700, None)
self.col_map=np.zeros([self.height,self.width])
# node list
self.list1 = []
self.list2 = []
self.list1.append(self.start)
self.list2.append(self.end)
# initialize windows
self.window = tk.Tk()
self.window.title('rrt')
self.window.geometry('%dx%d' % (self.width, self.height+100))
self.robot_R = 5
def A_figure(self,start,end):
#这个0是为了从元组中把path这个数组取出来
temp=self.m.get()
path = J.find_path([start.row,start.col], [end.row,end.col], temp)[0]
self.m.put(temp)
if len(path)>3:
t=0
while True:
flag=True
temp1=path[t]
temp3=path[t+2]
# check collision the second time: whether the path is in the collision!
col = np.linspace(temp1[1], temp3[1], int(np.sqrt((temp1[0]-temp3[0])**2+(temp1[1]-temp1[1])**2)), endpoint=True)
row = np.linspace(temp1[0], temp3[0], int(np.sqrt((temp1[0]-temp3[0])**2+(temp1[1]-temp1[1])**2)), endpoint=True)
for j in range(min(len(col), len(row))):
#在检查的时候发现,这里col和row弄反了!!
if self.col_map[int(row[j])][int(col[j])] > 100:
flag = False
if flag:
path.pop(t+1)
else:
t+=1
if temp3 == path[-1]:
break
return path
# initla window, canvas, collision and begin to extend
def init_map(self):
# initialize canvas
self.canvas = tk.Canvas(self.window, bg='white', height=self.height, width=self.width)
self.canvas.place(x=0, y=0, anchor='nw')
self.canvas.create_oval(self.start.col - 3, self.start.row - 3, self.start.col + 3, self.start.row + 3,
fill='red')
self.canvas.create_oval(self.end.col - 3, self.end.row - 3, self.end.col + 3, self.end.row + 3, fill='red')
self.add_collision()
self.canvas.update()
#q用来team交互
self.q=mp.Queue()
#m用来map交互(self.col_map_31_copy)
self.m=mp.Queue()
#p1用来交互原始路径
self.p1=mp.Queue()
#p2用来交互skip后的路径
self.p2=mp.Queue()
self.mp_one=mp.Process(target=self.extend)
self.mp_two=mp.Process(target=self.dilate)
self.mp_one.start()
self.mp_two.start()
# self.mp_one.join()
self.res=self.p1.get()
self.path=self.p2.get()
self.draw()
#共三个线程:主线程只是用来画图的,真正进行运算的是两个副线程。
tk.mainloop()
# add collisions
def add_collision(self):
# map1
'''
for r in range(300,500):
for c in range(0,300):
self.col_map[r][c]=255
for c in range(310,799):
self.col_map[r][c]=255
'''
# map2
'''
for r in range(200, 350):
for c1 in range(0, 300):
self.col_map[r][c1] = 255
for c2 in range(309, 799):
self.col_map[r][c2] = 255
for r in range(450, 525):
for c1 in range(0, r - 200):
self.col_map[r][c1] = 255
for c2 in range(r - 190, 500):
self.col_map[r][c2] = 255
for r in range(525, 600):
for c1 in range(0, 325 + 525 - r):
self.col_map[r][c1] = 255
for c2 in range(335 + 525 - r, 500):
self.col_map[r][c2] = 255
for r in range(450, 525):
for c1 in range(500, r - 200 + 300):
self.col_map[r][c1] = 255
for c2 in range(r - 190 + 300, 799):
self.col_map[r][c2] = 255
for r in range(525, 600):
for c1 in range(500, 275 + 475 - r + 400):
self.col_map[r][c1] = 255
for c2 in range(285 + 475 - r + 400, 799):
self.col_map[r][c2] = 255
'''
# map3
'''
for r in range(0,799):
for c in range(300,600):
self.col_map[r][c]=255
for r in range(305,310):
for c in range(300,355):
self.col_map[r][c]=0
for c in range(405,455):
self.col_map[r][c]=0
for c in range(505,555):
self.col_map[r][c]=0
for r in range(380,385):
for c in range(350,400):
self.col_map[r][c]=0
for c in range(450,500):
self.col_map[r][c]=0
for c in range(550,600):
self.col_map[r][c]=0
for r in range(305,385):
for c in range(350,355):
self.col_map[r][c]=0
for c in range(450,455):
self.col_map[r][c]=0
for c in range(550,555):
self.col_map[r][c]=0
for c in range(400,405):
self.col_map[r][c]=0
for c in range(500,505):
self.col_map[r][c]=0
'''
# compare_map1
'''
for r in range(0,600):
for c in range(100,200):
self.col_map[r][c]=255
for c in range(480,580):
self.col_map[r][c]=255
for r in range(300,800):
for c in range(210,310):
self.col_map[r][c]=255
for c in range(590,690):
self.col_map[r][c]=255
'''
# compare_map2
'''
#start=[350,750],end=[750,750]
for r in range(100,300):
for c in range(100,300):
self.col_map[r][c]=255
for r in range(0,350):
for c in range(600,700):
self.col_map[r][c]=255
for r in range(300,560):
for c in range(490,590):
self.col_map[r][c]=255
for r in range(560,660):
for c in range(150,800):
self.col_map[r][c]=255
'''
# compare_map3
'''
#start=[500,480],end=[550,650]
for r in range(100,300):
for c in range(100,300):
self.col_map[r][c]=255
for r in range(200,600):
for c in range(500,520):
self.col_map[r][c]=255
for r in range(600,620):
for c in range(250,520):
self.col_map[r][c]=255
for r in range(300,400):
for c in range(600,800):
self.col_map[r][c]=255
'''
# compare_map4
'''
#start=[50,400], end=[750,400]
for r in range(0, 200):
for c in range(250,300):
self.col_map[r][c]=255
for r in range(0, 20):
for c in range(500,550):
self.col_map[r][c]=255
for r in range(30, 200):
for c in range(500,550):
self.col_map[r][c]=255
for r in range(200,250):
for c in range(250,550):
self.col_map[r][c]=255
for r in range(600, 800):
for c in range(250,300):
self.col_map[r][c]=255
for r in range(600, 800):
for c in range(500,550):
self.col_map[r][c]=255
for r in range(550,600):
for c in range(250,300):
self.col_map[r][c]=255
for c in range(350,550):
self.col_map[r][c]=255
'''
# compare_map5
'''
# start=[50,400], end=[750,400]
for r in range(400):
for c in range(400):
if np.sqrt(r ** 2 + c ** 2) < 300:
self.col_map[r][c] = 255
for r in range(400, 800):
for c in range(400, 800):
if np.sqrt((r - 800) ** 2 + (c - 800) ** 2) < 300:
self.col_map[r][c] = 255
for r in range(400, 800):
for c in range(400):
if np.sqrt((r - 800) ** 2 + c ** 2) < 300:
self.col_map[r][c] = 255
for r in range(400):
for c in range(400, 800):
if np.sqrt(r ** 2 + (c - 800) ** 2) < 300:
self.col_map[r][c] = 255
for r in range(100, 700):
for c in range(100, 700):
if np.sqrt((r - 400) ** 2 + (c - 400) ** 2) < 250:
self.col_map[r][c] = 255
'''
# compare_map6
'''
# start=[50,50], end=[750,750]
for r in range(300):
for c in range(100, 200):
self.col_map[r][c] = 255
for r in range(310, 600):
for c in range(100, 200):
self.col_map[r][c] = 255
for r in range(610, 800):
for c in range(100, 200):
self.col_map[r][c] = 255
for r in range(500):
for c in range(260, 360):
self.col_map[r][c] = 255
for r in range(510, 800):
for c in range(260, 360):
self.col_map[r][c] = 255
for r in range(200):
for c in range(420, 520):
self.col_map[r][c] = 255
for r in range(210, 800):
for c in range(420, 520):
self.col_map[r][c] = 255
for r in range(100):
for c in range(570, 670):
self.col_map[r][c] = 255
for r in range(110, 400):
for c in range(570, 670):
self.col_map[r][c] = 255
for r in range(410, 800):
for c in range(570, 670):
self.col_map[r][c] = 255
'''
# compare_map7
'''
#start=[50,50], end=[750,750]
for r in range(800):
for c in range(100,200):
self.col_map[r][c]=255
for r in range(300,310):
for c in range(100,150):
self.col_map[r][c] = 0
for r in range(280,290):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(320,330):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(280,330):
for c in range(140,150):
self.col_map[r][c] = 0
for r in range(600,610):
for c in range(100,150):
self.col_map[r][c] = 0
for r in range(580,590):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(620,630):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(580,630):
for c in range(140,150):
self.col_map[r][c] = 0
for r in range(500):
for c in range(260,360):
self.col_map[r][c] = 255
for r in range(510,800):
for c in range(260,360):
self.col_map[r][c] = 255
for r in range(200):
for c in range(420,520):
self.col_map[r][c] = 255
for r in range(210,800):
for c in range(420,520):
self.col_map[r][c] = 255
for r in range(100):
for c in range(570,670):
self.col_map[r][c] = 255
for r in range(110,400):
for c in range(570,670):
self.col_map[r][c] = 255
for r in range(410,800):
for c in range(570,670):
self.col_map[r][c] = 255
'''
# compare_map8
#start=[50,50], end=[750,750]
for r in range(800):
for c in range(100,200):
self.col_map[r][c]=255
for r in range(300,310):
for c in range(100,150):
self.col_map[r][c] = 0
for r in range(280,290):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(320,330):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(280,330):
for c in range(140,150):
self.col_map[r][c] = 0
for r in range(600,610):
for c in range(100,150):
self.col_map[r][c] = 0
for r in range(580,590):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(620,630):
for c in range(140,200):
self.col_map[r][c] = 0
for r in range(580,630):
for c in range(140,150):
self.col_map[r][c] = 0
for r in range(800):
for c in range(260,360):
self.col_map[r][c] = 255
for r in range(500,510):
for c in range(260,360):
self.col_map[r][c]=0
for r in range(550,560):
for c in range(260,360):
self.col_map[r][c]=0
for r in range(500,560):
for c in range(300,310):
self.col_map[r][c]=0
for r in range(800):
for c in range(420,520):
self.col_map[r][c] = 255
for i in range(420,520):
for j in range(8):
self.col_map[i][i+j] = 0
for r in range(100):
for c in range(570,670):
self.col_map[r][c] = 255
for r in range(110,400):
for c in range(570,670):
self.col_map[r][c] = 255
for r in range(410,800):
for c in range(570,670):
self.col_map[r][c] = 255
for i in range(800):
for j in range(800):
if self.col_map[i][j] == 255:
self.canvas.create_rectangle(j, i, j, i, fill='black')
# figure out the nearest node
def spring(self, flag, mk_dir_flag=1):
new_r = int(self.height * np.random.rand())
new_c = int(self.width * np.random.rand())
if flag == 2:
self.list1,self.list2=self.list2,self.list1
# "Near". find rule:only the distance
min_node = 1000000
temp_node = node()
for i in range(len(self.list1)):
temp = self.list1[i]
dis_r = temp.row - new_r
dis_c = temp.col - new_c
distance = dis_r ** 2 + dis_c ** 2
if np.sqrt(distance) < min_node and distance > 0:
temp_node = temp
min_node = distance
# "Steer" and "Edge". link nodes
distance = np.sqrt(min_node)
if distance <= self.step_size:
new_node = node(new_r, new_c, temp_node)
else:
add_row = (new_r - temp_node.row) * self.step_size / distance + temp_node.row
add_col = (new_c - temp_node.col) * self.step_size / distance + temp_node.col
new_node = node(add_row, add_col, temp_node)
# check collision
col = np.linspace(temp_node.col, new_node.col, int(self.step_size ), endpoint=True)
row = np.linspace(temp_node.row, new_node.row, int(self.step_size ), endpoint=True)
for j in range(min(len(col), len(row))):
if self.col_map[int(row[j])][int(col[j])]>100:
if flag == 2:
self.list1, self.list2 = self.list2, self.list1
return False
self.list1.append(new_node)
# the tree birthed from the end node;
# 在第一颗树和新节点作用完成后,去考虑另一个树,从原来的树开始一直往new node连接,一直到撞到障碍物或者连接到new node(搜索结束)
min_node = 1000000
temp_node = node()
for i in range(len(self.list2)):
temp = self.list2[i]
dis_r = temp.row - new_node.row
dis_c = temp.col - new_node.col
distance = dis_r ** 2 + dis_c ** 2
if distance < min_node and distance > 0:
temp_node = temp
min_node = distance
# "Steer" and "Edge". link nodes
distance = np.sqrt(min_node)
if distance <= self.step_size:
new_node2 = node(new_node.row, new_node.col, temp_node)
else:
add_row = (new_node.row - temp_node.row) * self.step_size / distance + temp_node.row
add_col = (new_node.col - temp_node.col) * self.step_size / distance + temp_node.col
new_node2 = node(add_row, add_col, temp_node)
# check collision: whether the path is in the collision!
col = np.linspace(temp_node.col, new_node2.col, int(self.step_size ), endpoint=True)
row = np.linspace(temp_node.row, new_node2.row, int(self.step_size ), endpoint=True)
for j in range(min(len(col), len(row))):
if self.col_map[int(row[j])][int(col[j])]>100:
if flag == 2:
self.list1, self.list2 = self.list2, self.list1
return False
# self.canvas.create_rectangle(new_node2.col - 2, new_node2.row - 2, new_node2.col + 2, new_node2.row + 2,
# fill='green')
# self.canvas.create_line(new_node2.col, new_node2.row, temp_node.col, temp_node.row)
# self.canvas.update()
# add the new node into node list
self.list2.append(new_node2)
# 如果走一步就到了新node,就直接退出了
if new_node2 == new_node:
if flag == 2:
self.list1, self.list2 = self.list2, self.list1
return True
else:
while True:
distance = np.sqrt((new_node2.col - new_node.col) ** 2 + (new_node2.row - new_node.row) ** 2)
if distance <= self.step_size:
new_node3 = node(new_node.row, new_node.col, new_node2)
else:
add_row = (new_node.row - new_node2.row) * self.step_size / distance + new_node2.row
add_col = (new_node.col - new_node2.col) * self.step_size / distance + new_node2.col
new_node3 = node(add_row, add_col, new_node2)
# check collision the second time: whether the path is in the collision!
col = np.linspace(new_node2.col, new_node3.col, int(self.step_size ), endpoint=True)
row = np.linspace(new_node2.row, new_node3.row, int(self.step_size ), endpoint=True)
for j in range(min(len(col), len(row))):
if self.col_map[int(row[j])][int(col[j])] > 100:
if flag == 2:
self.list1, self.list2 = self.list2, self.list1
return False
# self.canvas.create_rectangle(new_node3.col - 2, new_node3.row - 2, new_node3.col + 2,
# new_node3.row + 2,
# fill='green')
# self.canvas.create_line(new_node2.col, new_node2.row, new_node3.col, new_node3.row)
# self.canvas.update()
# add the new node into node list
self.list2.append(new_node3)
# 结束标志,同上
if new_node3.row == new_node.row and new_node3.col == new_node.col:
if flag == 2:
self.list1, self.list2 = self.list2, self.list1
return True
# 更换new_node2,进行迭代
new_node2 = new_node3
# end requirement,返回的是能连接两个tree,且使得总长度最小的两个点
def end_limitation(self):
#t1,t2是两个可连接的节点
t1 = None
t2 = None
path_all_length = np.inf
#list1和list2是两个tree
for temp1 in self.list1:
for temp2 in self.list2:
dis = np.inf
if (temp1.row - temp2.row) ** 2 + (temp1.col - temp2.col) ** 2 <= self.step_size ** 2:
# calculate the length of all path
temp_node = temp1
dis = 0
while True:
if temp_node == self.start:
break
dis += np.sqrt(
(temp_node.row - temp_node.father.row) ** 2 + (temp_node.col - temp_node.father.col) ** 2)
temp_node = temp_node.father
temp_node = temp2
while True:
if temp_node == self.end:
break
dis += np.sqrt(
(temp_node.row - temp_node.father.row) ** 2 + (temp_node.col - temp_node.father.col) ** 2)
temp_node = temp_node.father
dis += np.sqrt((temp1.row - temp2.row) ** 2 + (temp1.col - temp2.col) ** 2)
if dis < path_all_length:
t1 = temp1
t2 = temp2
if t1 == None:
return False
return t1, t2
# expend nodes, flag is to figure whether to limit the new springed node's position
def extend(self, flag=0):
#如果extend的时间较大,大概率是因为此路径无法再优化了(椭圆内障碍物太多),这时直接退出就可以了;
#如果前后两次路径的差值小于1,则已收敛了
self.go=time.time()
print('thread one start time:',self.go)
self.is_success=True
k=0
while True:
k+=1
now=time.time()
if now-self.go>10:
# draw new node and link
for i in range(len(self.list1)):
self.canvas.create_rectangle(self.list1[i].col -2, self.list1[i].row - 2, self.list1[i].col + 2, self.list1[i].row + 2,
fill='green')
if self.list1[i].father!=None:
self.canvas.create_line(self.list1[i].col, self.list1[i].row, self.list1[i].father.col, self.list1[i].father.row)
for i in range(len(self.list2)):
self.canvas.create_rectangle(self.list2[i].col -2, self.list2[i].row - 2, self.list2[i].col + 2, self.list2[i].row + 2,
fill='green')
if self.list2[i].father!=None:
self.canvas.create_line(self.list2[i].col, self.list2[i].row, self.list2[i].father.col, self.list2[i].father.row)
self.canvas.update()
print('no path')
time.sleep(5)
exit()
if self.q.qsize()!=0 and k%50==0:
self.link()
# if len(self.list1)<=len(self.list2):
if k%2==0:
is_success=self.spring(1, flag)
else:
is_success=self.spring(2, flag)
if is_success:
print('总,连接成功')
print('总,总采样次数:',k)
temp = self.end_limitation()
if temp != False:
self.path = self.results(temp)
print('总,路径长度:',len(self.path))
print("总,执行时间为:",time.time()-self.go)
# for nod in self.path:
# print("[",nod.row,nod.col,"]")
# self.draw()
break
def draw(self):
# for temp in self.list1:
# self.canvas.create_rectangle(temp.col - 2, temp.row - 2, temp.col + 2,
# temp.row + 2,
# fill='green',outline='green')
# if temp.father:
# self.canvas.create_line(temp.col, temp.row, temp.father.col, temp.father.row,fill='black',width=2)
#
# for temp in self.list2:
# self.canvas.create_rectangle(temp.col - 2, temp.row - 2, temp.col + 2,
# temp.row + 2,
# fill='green',outline='green')
# if temp.father:
# self.canvas.create_line(temp.col, temp.row, temp.father.col, temp.father.row,fill='black',width=2)
for temp in range(len(self.path)):
self.canvas.create_rectangle(self.path[temp].col - 2, self.path[temp].row - 2, self.path[temp].col + 2,
self.path[temp].row + 2,
fill='red',outline='red')
if temp==len(self.path)-1:
break
self.canvas.create_line(self.path[temp].col, self.path[temp].row, self.path[temp+1].col, self.path[temp+1].row,fill='red',width=3)
self.canvas.update()
self.window2 = tk.Tk()
self.window2.title('rrt')
self.window2.geometry('%dx%d' % (self.width, self.height+100))
self.canvas2 = tk.Canvas(self.window2, bg='white', height=self.height, width=self.width)
self.canvas2.place(x=0, y=0, anchor='nw')
self.canvas2.create_oval(self.start.col - 3, self.start.row - 3, self.start.col + 3, self.start.row + 3,
fill='red')
self.canvas2.create_oval(self.end.col - 3, self.end.row - 3, self.end.col + 3, self.end.row + 3, fill='red')
for i in range(800):
for j in range(800):
if self.col_map[i][j] == 255:
self.canvas2.create_rectangle(j, i, j, i, fill='black')
# for temp in self.list1:
# self.canvas2.create_rectangle(temp.col - 2, temp.row - 2, temp.col + 2,
# temp.row + 2,
# fill='green',outline='green')
# if temp.father!=None:
# self.canvas2.create_line(temp.col, temp.row, temp.father.col, temp.father.row,width=2)
#
# for temp in self.list2:
# self.canvas2.create_rectangle(temp.col - 2, temp.row - 2, temp.col + 2,
# temp.row + 2,
# fill='green',outline='green')
# if temp.father!=None:
# self.canvas2.create_line(temp.col, temp.row, temp.father.col, temp.father.row,width=2)
for temp in range(len(self.res)):
self.canvas2.create_rectangle(self.res[temp].col - 2, self.res[temp].row - 2, self.res[temp].col + 2,
self.res[temp].row + 2,
fill='green',outline='green')
if temp==len(self.res)-1:
break
self.canvas2.create_line(self.res[temp].col, self.res[temp].row, self.res[temp+1].col, self.res[temp+1].row,fill='black',width=2)
for temp in range(len(self.path)):
self.canvas2.create_rectangle(self.path[temp].col - 2, self.path[temp].row - 2, self.path[temp].col + 2,
self.path[temp].row + 2,
fill='red',outline='red')
if temp==len(self.path)-1:
break
self.canvas2.create_line(self.path[temp].col, self.path[temp].row, self.path[temp+1].col, self.path[temp+1].row,fill='red',width=3)
self.canvas2.update()
#optimal path
def optim_path(self,path):
if len(path)>3:
t=0
while True:
flag=True
temp1=path[t]
temp3=path[t+2]
# check collision the second time: whether the path is in the collision!
col = np.linspace(temp1.col, temp3.col, int(np.sqrt((temp1.col-temp3.col)**2+(temp1.row-temp1.row)**2)), endpoint=True)
row = np.linspace(temp1.row, temp3.row, int(np.sqrt((temp1.col-temp3.col)**2+(temp1.row-temp1.row)**2)), endpoint=True)
for j in range(min(len(col), len(row))):
#在检查的时候发现,这里col和row弄反了!!
if self.col_map[int(row[j])][int(col[j])] > 100:
flag = False
if flag:
path.pop(t+1)
else:
t+=1
if t+2 == len(path):
break
return path
# when make it, go back to find the relavently low cost path
def results(self, temp_all):
# create the path list from start node to temp_all[0]
temp = temp_all[0]
res2 = []
res2.append(temp)
while temp != self.start:
temp = temp.father
res2.append(temp)
# reverse the results
res = []
l = len(res2) - 1
for i in range(len(res2)):
count = l - i
res.append(res2[count])
# create the path list from temp_all[1] to end node
temp = temp_all[1]
res.append(temp)
while temp != self.end:
temp = temp.father
res.append(temp)
num=0
while num!=len(res)-1:
if res[num].flag!=0 and res[num+1].flag!=0 and res[num].flag==res[num+1].flag:
print('总,到连接成功的时间为:',time.time()-self.go)
path=list(self.A_figure(res[num],res[num+1]))
path.pop(0)
path.pop(-1)
for i in range(len(path)):
res.insert(num+1+i,node(path[i][0],path[i][1],res[num+i]))
num+=len(path)+1
else:
num+=1
# return self.optim_path(res)
r=res.copy()
self.res=self.optim_path(r)
self.p1.put(res)
self.p2.put(self.res)
return res
def dilate(self):
self.start_time = time.time()
print('tread two start time:',self.start_time)
map = self.col_map
kernel = np.ones((2 * self.robot_R + 1, 2 * self.robot_R + 1), np.uint8)
map = cv.dilate(src=map, kernel=kernel)
self.col_map_21 = cv.erode(src=map, kernel=kernel)
print("2,到闭运算的时间为:", time.time() - self.start_time)
self.col_map_31 = np.zeros([self.height, self.width])
for r in range(self.height):
for c in range(self.width):
if self.col_map_21[r][c]!=0 and self.col_map[r][c]==0:
self.col_map_31[r][c] = 1
self.col_map_31_copy=self.col_map_31.copy()
self.m.put(self.col_map_31_copy)
# cv.imshow('a',self.col_map_31_copy)
# cv.waitKey(0)
print("2,到绿色通路为止的时间为:", time.time() - self.start_time)
# self.canvas.update()
self.col_map_4 = np.zeros([self.height, self.width])
for r in range(1, self.height - 1):
for c in range(1, self.width - 1):
if self.col_map[r][c] == 0 and self.col_map_31[r][c] > 0:
# attention 下面这一块导致了转角处为0
if self.col_map[r - 1][c] == 0 and self.col_map_31[r - 1][c] == 0 or self.col_map[r + 1][c] == 0 and self.col_map_31[r + 1][c] == 0 or self.col_map[r][c - 1] == 0 and self.col_map_31[r][c - 1] == 0 or self.col_map[r][c + 1] == 0 and self.col_map_31[r][c + 1] == 0:
self.col_map_4[r][c] = 1
print("2,到边界生成的时间为:", time.time() - self.start_time)
# 加copy很重要
self.col_map_4_cop = self.col_map_4.copy()
#这里通过取中间点的方式,将线合并成了点
border = []
r = 0
while r < self.height:
c = 0
while c < self.height:
if self.col_map_4[r][c] > 0:
k = 1
r_n = r
while self.col_map_4[r + k][c] == 1:
self.col_map_4[r + k][c] = 0
r_n = r + k
k += 1
k = 1
c_n = c
while self.col_map_4[r][c + k] == 1:
self.col_map_4[r][c + k] = 0
c_n = c + k
k += 1
# 取中点
border.append([int(r_n / 2 + r / 2), int(c_n / 2 + c / 2)])
self.col_map_31[int(r_n / 2 + r / 2)][int(c_n / 2 + c / 2)] = 2
c = c_n
c += 1
r += 1
print(border)
print("2,到求解border时间为:", time.time() - self.start_time)
# 这里之前的时间浪费主要是在closelist,改成遍历地图之后,快了超级多
# 这里主要是给border做配对,以某个border点为起点,进行扩散(被扩展到的点,在col_map_31中的值被改成9)
for i in border:
if self.col_map_31[i[0]][i[1]] != 2:
continue
team = []
team.append([i[0], i[1]])
open_list = []
self.col_map_31[i[0]][i[1]]=9
# 扩展周围节点
for j in range(-1, 2):
for k in range(-1, 2):
if j == 0 and k == 0:
continue
if self.col_map_31[i[0] + j][i[1] + k] != 0:
open_list.append([i[0] + j, i[1] + k])
while len(open_list) != 0:
node = open_list.pop(0)
self.col_map_31[node[0]][node[1]]=9
for j in range(-1, 2):
for k in range(-1, 2):
if j == 0 and k == 0:
continue
if self.col_map_31[node[0] + j][node[1] + k]==9 or [node[0] + j, node[1] + k] in open_list:
continue
# 被扩展到的border点被添加到team中
if self.col_map_31[node[0] + j][node[1] + k] == 2:
self.col_map_31[node[0] + j][node[1] + k] = 0
team.append([node[0] + j, node[1] + k])
# 在border中剔除该点
# border.remove([node[0]+j,node[1]+k])
continue
if self.col_map_31[node[0] + j][node[1] + k] == 1:
open_list.append([node[0] + j, node[1] + k])
#dead path
if len(team) == 1:
continue
print(team)
self.q.put(team)
print("2,到生成一个局部树的时间为:", time.time() - self.start_time)
def link(self):
m=0
fail=[]
while self.q.qsize()!=0:
team=self.q.get()
is_success=0
for num in range(len(team)):
# list1
new_r=team[num][0]
new_c=team[num][1]
flag1=1
flag2=1
min_node = 1000000
temp_node = node()
for i in range(len(self.list1)):
temp = self.list1[i]
dis_r = temp.row - new_r
dis_c = temp.col - new_c
distance = dis_r ** 2 + dis_c ** 2
if np.sqrt(distance) < min_node and distance > 0:
temp_node = temp
min_node = distance
# "Steer" and "Edge". link nodes
distance = np.sqrt(min_node)
new_node = node(new_r, new_c, temp_node,flag=m+1)
# check collision
col = np.linspace(temp_node.col, new_node.col, int(self.step_size), endpoint=True)
row = np.linspace(temp_node.row, new_node.row, int(self.step_size), endpoint=True)
for j in range(min(len(col), len(row))):
if self.col_map[int(row[j])][int(col[j])] > 100:
flag1=0
if flag1==1:
self.list1.append(new_node)
for i in range(len(team)):
if i==num:
continue
self.list1.append(node(team[i][0],team[i][1],new_node,flag=m+1))
#list2
if flag1==0:
min_node = 1000000
temp_node = node()
for i in range(len(self.list2)):
temp = self.list2[i]
dis_r = temp.row - new_r
dis_c = temp.col - new_c
distance = dis_r ** 2 + dis_c ** 2
if np.sqrt(distance) < min_node and distance > 0:
temp_node = temp
min_node = distance
# "Steer" and "Edge". link nodes
distance = np.sqrt(min_node)
new_node = node(new_r, new_c, temp_node,flag=m+1)
# check collision
col = np.linspace(temp_node.col, new_node.col, int(self.step_size), endpoint=True)
row = np.linspace(temp_node.row, new_node.row, int(self.step_size), endpoint=True)
for j in range(min(len(col), len(row))):
if self.col_map[int(row[j])][int(col[j])] > 100:
flag2 = 0
if flag2 == 1:
self.list2.append(new_node)
for i in range(len(team)):
if i==num:
continue
self.list2.append(node(team[i][0], team[i][1], new_node,flag=m+1))
if flag1==1 or flag2==1:
is_success=1
break
if is_success==0:
fail.append(team)
m+=1
for i in fail:
self.q.put(i)
if __name__ == '__main__':
rrt_agent = rrt()
rrt_agent.init_map()
|
multiprocess_window.py
|
"""
How to run a cancellable function when a new Tkinter window opens
https://stackoverflow.com/questions/37669517/how-to-run-a-cancellable-function-when-a-new-tkinter-window-opens
"""
from tkinter import ttk, messagebox, Toplevel, Tk
from tkinter.ttk import Frame, Button
import time
import multiprocessing
def foo():
for i in range(100):
print(i)
time.sleep(0.1)
class TerminatedProcess(Exception):
def __init__(self, error_str="Process was terminated"):
self.error_str = error_str
class ProcessWindow(Toplevel):
"""docstring for ProcessWindow"""
def __init__(self, parent, process):
super(ProcessWindow, self).__init__(parent)
self.parent = parent
self.process = process
terminate_button = Button(self, text="cancel", command=self.cancel)
terminate_button.grid(row=0, column=0)
self.grab_set() # so you can't push submit multiple times
def cancel(self):
self.process.terminate()
self.destroy()
raise TerminatedProcess
def launch(self):
self.process.start()
#this blocks mainloop of root
self.process.join()
self.destroy()
class MainApp(Frame):
def __init__(self, parent, *args, **kwargs):
super(MainApp, self).__init__(parent, *args, **kwargs)
self.parent = parent
self.button = Button(self, text="foo", command=self.callback)
self.button.grid(row=0, column=0)
def callback(self):
try:
proc = multiprocessing.Process(target=foo)
process_window = ProcessWindow(self, proc)
process_window.launch()
except TerminatedProcess as e:
messagebox.showinfo(title="canceled", message=e.error_str)
else:
messagebox.showinfo(message="sucessful run", title="Finished")
finally:
pass
def main():
root = Tk()
app = MainApp(root, padding=(4))
app.grid(column=0, row=0)
root.mainloop()
if __name__ == '__main__':
main()
|
test_direct.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pytest
from threading import (
Thread,
Event,
)
from neo4j import (
Config,
PoolConfig,
WorkspaceConfig,
)
from neo4j.io import (
Bolt,
BoltPool,
IOPool
)
from neo4j.exceptions import (
ClientError,
ServiceUnavailable,
)
class FakeSocket:
def __init__(self, address):
self.address = address
def getpeername(self):
return self.address
def sendall(self, data):
return
def close(self):
return
class QuickConnection:
def __init__(self, socket):
self.socket = socket
self.address = socket.getpeername()
def reset(self):
pass
def close(self):
self.socket.close()
def closed(self):
return False
def defunct(self):
return False
def timedout(self):
return False
class FakeBoltPool(IOPool):
def __init__(self, address, *, auth=None, **config):
self.pool_config, self.workspace_config = Config.consume_chain(config, PoolConfig, WorkspaceConfig)
if config:
raise ValueError("Unexpected config keys: %s" % ", ".join(config.keys()))
def opener(addr, timeout):
return QuickConnection(FakeSocket(addr))
super().__init__(opener, self.pool_config, self.workspace_config)
self.address = address
def acquire(self, access_mode=None, timeout=None, database=None):
return self._acquire(self.address, timeout)
class BoltTestCase(TestCase):
def test_open(self):
with pytest.raises(ServiceUnavailable):
connection = Bolt.open(("localhost", 9999), auth=("test", "test"))
def test_open_timeout(self):
with pytest.raises(ServiceUnavailable):
connection = Bolt.open(("localhost", 9999), auth=("test", "test"), timeout=1)
def test_ping(self):
protocol_version = Bolt.ping(("localhost", 9999))
assert protocol_version is None
def test_ping_timeout(self):
protocol_version = Bolt.ping(("localhost", 9999), timeout=1)
assert protocol_version is None
class ConnectionPoolTestCase(TestCase):
def setUp(self):
self.pool = FakeBoltPool(("127.0.0.1", 7687))
def tearDown(self):
self.pool.close()
def assert_pool_size(self, address, expected_active, expected_inactive, pool=None):
if pool is None:
pool = self.pool
try:
connections = pool.connections[address]
except KeyError:
self.assertEqual(0, expected_active)
self.assertEqual(0, expected_inactive)
else:
self.assertEqual(expected_active, len([cx for cx in connections if cx.in_use]))
self.assertEqual(expected_inactive, len([cx for cx in connections if not cx.in_use]))
def test_can_acquire(self):
address = ("127.0.0.1", 7687)
connection = self.pool._acquire(address, timeout=3)
assert connection.address == address
self.assert_pool_size(address, 1, 0)
def test_can_acquire_twice(self):
address = ("127.0.0.1", 7687)
connection_1 = self.pool._acquire(address, timeout=3)
connection_2 = self.pool._acquire(address, timeout=3)
assert connection_1.address == address
assert connection_2.address == address
assert connection_1 is not connection_2
self.assert_pool_size(address, 2, 0)
def test_can_acquire_two_addresses(self):
address_1 = ("127.0.0.1", 7687)
address_2 = ("127.0.0.1", 7474)
connection_1 = self.pool._acquire(address_1, timeout=3)
connection_2 = self.pool._acquire(address_2, timeout=3)
assert connection_1.address == address_1
assert connection_2.address == address_2
self.assert_pool_size(address_1, 1, 0)
self.assert_pool_size(address_2, 1, 0)
def test_can_acquire_and_release(self):
address = ("127.0.0.1", 7687)
connection = self.pool._acquire(address, timeout=3)
self.assert_pool_size(address, 1, 0)
self.pool.release(connection)
self.assert_pool_size(address, 0, 1)
def test_releasing_twice(self):
address = ("127.0.0.1", 7687)
connection = self.pool._acquire(address, timeout=3)
self.pool.release(connection)
self.assert_pool_size(address, 0, 1)
self.pool.release(connection)
self.assert_pool_size(address, 0, 1)
def test_in_use_count(self):
address = ("127.0.0.1", 7687)
self.assertEqual(self.pool.in_use_connection_count(address), 0)
connection = self.pool._acquire(address, timeout=3)
self.assertEqual(self.pool.in_use_connection_count(address), 1)
self.pool.release(connection)
self.assertEqual(self.pool.in_use_connection_count(address), 0)
def test_max_conn_pool_size(self):
with FakeBoltPool((), max_connection_pool_size=1) as pool:
address = ("127.0.0.1", 7687)
pool._acquire(address, timeout=0)
self.assertEqual(pool.in_use_connection_count(address), 1)
with self.assertRaises(ClientError):
pool._acquire(address, timeout=0)
self.assertEqual(pool.in_use_connection_count(address), 1)
def test_multithread(self):
with FakeBoltPool((), max_connection_pool_size=5) as pool:
address = ("127.0.0.1", 7687)
releasing_event = Event()
# We start 10 threads to compete connections from pool with size of 5
threads = []
for i in range(10):
t = Thread(target=acquire_release_conn, args=(pool, address, releasing_event))
t.start()
threads.append(t)
# The pool size should be 5, all are in-use
self.assert_pool_size(address, 5, 0, pool)
# Now we allow thread to release connections they obtained from pool
releasing_event.set()
# wait for all threads to release connections back to pool
for t in threads:
t.join()
# The pool size is still 5, but all are free
self.assert_pool_size(address, 0, 5, pool)
def acquire_release_conn(pool, address, releasing_event):
conn = pool._acquire(address, timeout=3)
releasing_event.wait()
pool.release(conn)
|
gate_runner.py
|
# Copyright 2019 École Polytechnique Fédérale de Lausanne. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
import threading
import itertools
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.contrib.gate.protobuf import gate_runner_pb2
from . import gate as ggate
GATE_RUNNERS = "gate_runners"
class GateRunner(object):
def __init__(self, gate, enqueue_ops, cancel_op=None, close_op=None, gate_closed_exception_types=None, device=None, request_stop_on_success=False):
# TODO have more asserts here to protect the incoming types
self._gate = gate
self._enqueue_ops = enqueue_ops
assert len(enqueue_ops) > 0
# used for accounting / tracking
self._enqueue_ops_per_session = weakref.WeakKeyDictionary()
self._lock = threading.Lock()
# TODO allow these to be specified
if gate_closed_exception_types is None:
exception_types = (errors.OutOfRangeError,)
elif isinstance(gate_closed_exception_types, (list, tuple)):
exception_types = gate_closed_exception_types
else:
exception_types = (gate_closed_exception_types,)
for e in exception_types:
if not issubclass(e, errors.OpError):
raise Exception("Can't except non-{} type for gate exceptions: {}".format(errors.OpError, e))
self._gate_closed_exception_types = exception_types
assert isinstance(request_stop_on_success, bool)
self._request_stop_on_success = request_stop_on_success
self._exceptions_raised = []
if close_op is None:
self._close_op = self._gate.close(cancel_pending_enqueues=False)
else:
self._close_op = close_op
if cancel_op is None:
self._cancel_op = self._gate.close(cancel_pending_enqueues=True)
else:
self._cancel_op = cancel_op
if device is None:
all_devices = { enq_op_input.device for enq_op_input in itertools.chain.from_iterable(
enq_op.inputs[1:] for enq_op in enqueue_ops
) }
if len(all_devices) > 1:
raise Exception("Have more than 1 device for inputs. Please specify this manually for constructing this gate_runner.\nGot: {}".format(all_devices))
assert len(all_devices) == 1
device = all_devices.pop()
self._device = device
def _run(self, sess, enqueue_op, coord=None):
"""
If coord is None, then this stop coordinator is unused
:param sess:
:param enqueue_op:
:param coord:
:return:
"""
decremented = False
try:
enqueue_callable = sess.make_callable(enqueue_op)
while coord is None or not coord.should_stop():
try:
enqueue_callable()
except self._gate_closed_exception_types as e:
with self._lock:
self._enqueue_ops_per_session[sess] -= 1
decremented = True
if self._enqueue_ops_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception:
pass
finally:
if self._request_stop_on_success:
if coord is None:
print("Requesting stop on success not possible! {name} doesn't have a coordinator".format(name=self.name))
else:
coord.request_stop()
return # to break out of the loop
except Exception as e:
if coord is not None:
coord.request_stop(e)
else:
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
if not decremented:
with self._lock:
self._enqueue_ops_per_session[sess] -= 1
def _close_on_stop(self, sess, coord):
coord.wait_for_stop()
try:
with self._lock:
if len(self._exceptions_raised) == 0:
sess.run(self._close_op)
else:
sess.run(self._cancel_op)
except Exception as e:
pass # TODO log this somehow
def create_threads(self, sess, coord=None, daemon=False, start=False):
# set up and create all the create_threads stuff, as well as the
# prior stuff
with self._lock:
try:
if self._enqueue_ops_per_session[sess] > 0:
return []
except KeyError:
pass
self._enqueue_ops_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised.clear() # yes, we use python3
ret_threads = [threading.Thread(target=self._run, args=(sess, e, coord), name="{}_gate_runner_enqueuer_device_{}".format(self.name, self.device), daemon=daemon) for e in self._enqueue_ops]
if coord is not None:
ret_threads.append(threading.Thread(target=self._close_on_stop, args=(sess, coord), daemon=daemon, name="{}_gate_runner_coordinator_device_{}".format(self.name, self.device)))
for t in ret_threads:
if coord is not None:
coord.register_thread(t)
if start:
t.start()
return ret_threads
@property
def gate(self):
return self._gate
@property
def device(self):
return self._device
@property
def enqueue_ops(self):
return self._enqueue_ops
@property
def close_op(self):
return self._close_op
@property
def cancel_op(self):
return self._cancel_op
@property
def gate_closed_exception_types(self):
return self._gate_closed_exception_types
@property
def name(self):
"""The string name of the underlying Queue."""
return self._gate.name
@property
def smart_name(self):
return "{}_device_{}".format(self.name, self.device)
def to_proto(self, export_scope=None):
if (export_scope is None or self.gate.name.startswith(export_scope)):
gate_runner_def = gate_runner_pb2.GateRunnerDef()
gate_runner_def.gate_name = ops.strip_name_scope(
self.gate.name, export_scope
)
for enqueue_op in self.enqueue_ops:
gate_runner_def.enqueue_op_name.append(
ops.strip_name_scope(enqueue_op.name, export_scope)
)
gate_runner_def.close_op_name = ops.strip_name_scope(
self.cancel_op.name, export_scope
)
gate_runner_def.cancel_op_name = ops.strip_name_scope(
self.close_op.name, export_scope
)
gate_runner_def.device = self.device
gate_runner_def.gate_closed_exception_types.extend(
errors.error_code_from_exception_type(cls)
for cls in self.gate_closed_exception_types
)
gate_runner_def.request_stop_on_success = self._request_stop_on_success
return gate_runner_def
else:
return None
@staticmethod
def from_proto(gate_runner_def, import_scope=None):
assert isinstance(gate_runner_def, gate_runner_pb2.GateRunnerDef)
g = ops.get_default_graph()
gate = g.as_graph_element(
ops.prepend_name_scope(gate_runner_def.gate_name, import_scope))
enqueue_ops = [
g.as_graph_element(ops.prepend_name_scope(op, import_scope)) for op in gate_runner_def.enqueue_op_name
]
close_op = g.as_graph_element(ops.prepend_name_scope(gate_runner_def.close_op_name, import_scope))
cancel_op = g.as_graph_element(ops.prepend_name_scope(gate_runner_def.cancel_op_name, import_scope))
device = gate_runner_def.device
gate_closed_exception_types = tuple(
errors.exception_type_from_error_code(code)
for code in gate_runner_def.gate_closed_exception_types)
if len(gate_closed_exception_types) == 0:
gate_closed_exception_types = (errors.OutOfRangeError,)
request_stop_on_success = gate_runner_def.request_stop_on_success
return GateRunner(gate=gate,
device=device,
enqueue_ops=enqueue_ops,
cancel_op=cancel_op,
close_op=close_op,
gate_closed_exception_types=gate_closed_exception_types,
request_stop_on_success=request_stop_on_success)
def add_gate_runner(gate_runner, collection=GATE_RUNNERS):
ops.add_to_collection(collection, gate_runner)
def gate_runner(gate, ops, collection=GATE_RUNNERS):
add_gate_runner(
gate_runner=GateRunner(gate=gate, enqueue_ops=ops),
collection=collection
)
def start_gate_runners(sess=None, coord=None, daemon=True, start=True, collection=GATE_RUNNERS, device=None):
if sess is None:
sess = ops.get_default_session()
if not sess:
raise ValueError("Cannot start gate runners. No default session is registered, and it wasn't specified")
with sess.graph.as_default():
return list(
itertools.chain.from_iterable(
gate_runner.create_threads(sess=sess, coord=coord, daemon=daemon, start=start)
for gate_runner in ops.get_collection(collection) if device is None or gate_runner.device == device
)
)
ops.register_proto_function(GATE_RUNNERS,
proto_type=gate_runner_pb2.GateRunnerDef,
to_proto=GateRunner.to_proto,
from_proto=GateRunner.from_proto)
|
sync.py
|
"""
This module is about synchronizing and coordinating events among concurrent activities.
"""
from contextlib import contextmanager, ExitStack
import sys
import threading
import time
import inspect
from functools import wraps
import re
import logging
import atexit
import signal
import os
from collections import Counter
import easypy._multithreading_init # noqa
from .bunch import Bunch
from .gevent import is_module_patched
from .decorations import wrapper_decorator, parametrizeable_decorator
from .caching import locking_cache
from .exceptions import PException, TException
from .units import NEVER, MINUTE, HOUR
from .misc import Hex
from .humanize import time_duration # due to interference with jrpc
from .misc import kwargs_resilient
_logger = logging.getLogger(__name__)
_verbose_logger = logging.getLogger('%s.locks' % __name__) # logger for less important logs of RWLock.
IS_A_TTY = sys.stdout.isatty()
class TimeoutException(PException, TimeoutError):
pass
class PredicateNotSatisfied(TimeoutException):
# use this exception with 'wait', to indicate predicate is not satisfied
# and allow it to raise a more informative exception
pass
class LockLeaseExpired(TException):
template = "Lock Lease Expired - thread is holding this lock for too long"
class ProcessExiting(TException):
template = "Aborting thread - process is exiting"
class TimebombExpired(TException):
template = "Timebomb Expired - process killed itself"
exit_with_code = 234
_exiting = False # we use this to break out of lock-acquisition loops
@atexit.register
def break_locks():
global _exiting
_exiting = True
def _check_exiting():
if _exiting:
raise ProcessExiting()
class TerminationSignal(TException):
template = "Process got a termination signal: {_signal}"
class NotMainThread(TException):
template = "Binding must be invoked from main thread"
class SignalAlreadyBound(TException):
template = "Signal already bound to another signal handler(s)"
class LastErrorEmpty(TException):
template = "Signal caught, but no error to raise"
LAST_ERROR = None
REGISTERED_SIGNAL = None
class NotInitialized(TException):
template = "Signal type not initialized, must use bind_to_subthread_exceptions in the main thread"
def async_raise_in_main_thread(exc, use_concurrent_loop=True):
"""
Uses a unix signal to raise an exception to be raised in the main thread.
"""
from plumbum import local
pid = os.getpid()
if not REGISTERED_SIGNAL:
raise NotInitialized()
# sometimes the signal isn't caught by the main-thread, so we should try a few times (WEKAPP-14543)
def do_signal(raised_exc):
global LAST_ERROR
if LAST_ERROR is not raised_exc:
_logger.debug("MainThread took the exception - we're done here")
if use_concurrent_loop:
raiser.stop()
return
_logger.info("Raising %s in main thread", type(LAST_ERROR))
local.cmd.kill("-%d" % REGISTERED_SIGNAL, pid)
if use_concurrent_loop:
from .concurrency import concurrent
raiser = concurrent(do_signal, raised_exc=exc, loop=True, sleep=30, daemon=True, throw=False)
raiser.start()
else:
do_signal(exc)
if is_module_patched("threading"):
import gevent
def _rimt(exc):
_logger.info('YELLOW<<killing main thread greenlet>>')
main_thread_greenlet = threading.main_thread()._greenlet
orig_throw = main_thread_greenlet.throw
# we must override "throw" method so exception will be raised with the original traceback
def throw(*args):
if len(args) == 1:
ex = args[0]
return orig_throw(ex.__class__, ex, ex.__traceback__)
return orig_throw(*args)
main_thread_greenlet.throw = throw
gevent.kill(main_thread_greenlet, exc)
_logger.debug('exiting the thread that failed')
raise exc
else:
_rimt = async_raise_in_main_thread
# must be invoked in main thread in "geventless" runs in order for raise_in_main_thread to work
def initialize_exception_listener():
global REGISTERED_SIGNAL
if REGISTERED_SIGNAL:
# already registered
return
if threading.current_thread() is not threading.main_thread():
raise NotMainThread()
def handle_signal(sig, stack):
global LAST_ERROR
error = LAST_ERROR
LAST_ERROR = None
if error:
raise error
raise LastErrorEmpty(signal=sig)
custom_signal = signal.SIGUSR1
if signal.getsignal(custom_signal) in (signal.SIG_DFL, signal.SIG_IGN): # check if signal is already trapped
signal.signal(custom_signal, handle_signal)
REGISTERED_SIGNAL = custom_signal
else:
raise SignalAlreadyBound(signal=custom_signal)
@contextmanager
def raise_in_main_thread(exception_type=Exception):
try:
yield
except ProcessExiting:
# this exception is meant to stay within the thread
raise
except exception_type as exc:
if threading.current_thread() is threading.main_thread():
raise
exc._raised_asynchronously = True
global LAST_ERROR
if LAST_ERROR:
_logger.warning("a different error (%s) is pending - skipping", type(LAST_ERROR))
raise
LAST_ERROR = exc
_rimt(exc)
def initialize_termination_listener(sig=signal.SIGTERM, _registered=[]):
if _registered:
# already registered
return
_registered.append(True)
def handle_signal(sig, stack):
_logger.error("RED<<SIGNAL %s RECEIVED>>", sig)
raise TerminationSignal(_signal=sig)
if signal.getsignal(sig) in (signal.SIG_DFL, signal.SIG_IGN): # check if signal is already trapped
_logger.info("Listening to signal %s", sig)
signal.signal(sig, handle_signal)
else:
raise SignalAlreadyBound(signal=sig)
def kill_subprocesses():
from plumbum import local
pid = os.getpid()
return local.cmd.pkill['-HUP', '-P', pid].run(retcode=None)
def kill_this_process(graceful=False):
from plumbum import local
pid = os.getpid()
if graceful:
flag = '-HUP'
else:
flag = '-9'
local.cmd.kill(flag, pid)
class Timebomb(object):
def __init__(self, timeout, alert_interval=None, quiet=False):
self.fuse = threading.Event() # use this to cancel the timebomb
self.timeout = timeout
self.alert_interval = alert_interval
self.quiet = quiet
def __enter__(self):
return self.start()
def __exit__(self, *args):
self.cancel()
def start(self):
from .concurrency import concurrent
self.t = concurrent(self.wait_and_kill, daemon=True, threadname="Timebomb(%s)" % self.timeout)
self.t.start()
return self
def cancel(self):
self.fuse.set()
@raise_in_main_thread()
def wait_and_kill(self):
timer = Timer(expiration=self.timeout)
if not self.quiet:
_logger.info("Timebomb set - this process will YELLOW<<self-destruct>> in RED<<%r>>...", timer.remain)
while not timer.expired:
if self.alert_interval:
_logger.info("Time Elapsed: MAGENTA<<%r>>", timer.elapsed)
log_level = logging.WARNING if timer.remain < 5 * MINUTE else logging.DEBUG
_logger.log(log_level, "This process will YELLOW<<self-destruct>> in RED<<%r>>...", timer.remain)
if self.fuse.wait(min(self.alert_interval or 60, timer.remain)):
_logger.info("Timebomb cancelled")
return
_logger.warning("RED<< 💣 Timebomb Expired! 💣 >>")
with _logger.indented("Killing children"):
kill_subprocesses()
with _logger.indented("Committing suicide"):
kill_this_process(graceful=False)
raise Exception("Timebomb Expired")
def set_timebomb(timeout, alert_interval=None):
return Timebomb(timeout=timeout, alert_interval=alert_interval).start()
@wrapper_decorator
def shared_contextmanager(func):
@locking_cache
def inner(*args, **kwargs):
class CtxManager():
def __init__(self):
self.count = 0
self.func_cm = contextmanager(func)
self._lock = threading.RLock()
def __enter__(self):
with self._lock:
if self.count == 0:
self.ctm = self.func_cm(*args, **kwargs)
self.obj = self.ctm.__enter__()
self.count += 1
return self.obj
def __exit__(self, *args):
with self._lock:
self.count -= 1
if self.count > 0:
return
self.ctm.__exit__(*sys.exc_info())
del self.ctm
del self.obj
return CtxManager()
return inner
class TagAlongThread(object):
def __init__(self, func, name, minimal_sleep=0, wait_for_trigger=True):
self._func = func
self.minimal_sleep = minimal_sleep
self.wait_for_trigger = wait_for_trigger
self._lock = threading.RLock()
self._iteration_trigger = threading.Event()
self._iterating = threading.Event()
self._not_iterating = threading.Event()
self._not_iterating.set()
self._last_exception = None
self._last_result = None
self._generation = 0
self.__alive = True
self._results = {}
self._thread = threading.Thread(target=self._loop, daemon=True, name=name)
self._thread.start()
def _loop(self):
while self.__alive:
if self.wait_for_trigger:
self._iteration_trigger.wait()
# Mark that we are now iterating
self._not_iterating.clear()
self._iterating.set()
self._generation += 1
try:
exc, result = None, self._func()
except Exception as e:
exc, result = e, None
self._results[self._generation] = exc, result
self._results.pop(self._generation - 3, None)
# Mark that we are no longer iterating
self._iterating.clear()
self._not_iterating.set()
time.sleep(self.minimal_sleep)
self._iteration_trigger.clear()
# Set all events so nothing will get blocked
self._iteration_trigger.set()
self._iterating.set()
self._not_iterating.set()
def __repr__(self):
return 'TagAlongThread<%s>' % (self._thread.name,)
def wait(self, current_generation=False):
assert self.__alive, '%s is dead' % self
target_generation = self._generation + (1 - current_generation)
while True:
self._iteration_trigger.set() # Signal that we want an iteration
while not self._iterating.wait(.1): # Wait until an iteration starts
# It is possible that we missed the loop and _iterating was already
# cleared. If this is the case, _not_iterating will not be set -
# and we can use it as a signal to stop waiting for iteration.
if self._not_iterating.is_set():
break
else:
self._not_iterating.wait() # Wait until it finishes
ret = self._results.get(target_generation)
if ret:
# make sure that this iteration started *after* this call was made,
# otherwise wait for the next iteration
break
# To avoid races, copy last exception and result to local variables
last_exception, last_result = ret
if last_exception:
raise last_exception
else:
return last_result
__call__ = wait
def _kill(self, wait=True):
self.__alive = False
self._iteration_trigger.set()
if wait:
self._thread.join()
def __enter__(self):
return self
def __exit__(self, *args):
self._kill(wait=True)
def __del__(self):
self._kill(wait=False)
class SynchronizationCoordinatorWrongWait(TException):
template = "Task is waiting on {this_file}:{this_line} instead of {others_file}:{others_line}"
class SynchronizationCoordinator(object):
"""
Synchronization helper for functions that run concurrently::
sync = SynchronizationCoordinator(5)
def foo(a):
sync.wait_for_everyone()
sync.collect_and_call_once(a, lambda a_values: print(a))
MultiObject(range(5)).call(foo)
When MultiObject/concurrent_map/sync runs a function with a ``_sync=SYNC``
argument, it will replace it with a proper SynchronizationCoordinator instance::
def foo(a, _sync=SYNC):
_sync.wait_for_everyone()
_sync.collect_and_call_once(a, lambda a_values: print(a))
MultiObject(range(5)).call(foo)
"""
def __init__(self, num_participants):
self.num_participants = num_participants
self._reset_barrier()
self._lock = threading.Lock()
self._call_once_collected_param = []
self._call_once_function = None
self._call_once_result = None
self._call_once_raised_exception = False
self._wait_context = None
def _reset_barrier(self):
self.barrier = threading.Barrier(self.num_participants, action=self._post_barrier_action)
def _post_barrier_action(self):
if self.num_participants != self.barrier.parties:
self._reset_barrier()
self._wait_context = None
if self._call_once_function:
call_once_function, self._call_once_function = self._call_once_function, None
collected_param, self._call_once_collected_param = self._call_once_collected_param, []
try:
self._call_once_result = call_once_function(collected_param)
self._call_once_raised_exception = False
except BaseException as e:
self._call_once_result = e
self._call_once_raised_exception = True
def wait_for_everyone(self, timeout=HOUR):
"""
Block until all threads that participate in the synchronization coordinator reach this point.
Fail if one of the threads is waiting at a different point::
def foo(a, _sync=SYNC):
sleep(a)
# Each thread will reach this point at a different time
_sync.wait_for_everyone()
# All threads will reach this point together
MultiObject(range(5)).call(foo)
"""
self._verify_waiting_on_same_line()
self.barrier.wait(timeout=timeout)
def abandon(self):
"""
Stop participating in this synchronization coordinator.
Note: when using with MultiObject/concurrent_map/asynchronous and _sync=SYNC, this
is called automatically when a thread terminates on return or on exception.
"""
with self._lock:
self.num_participants -= 1
self.barrier.wait()
def collect_and_call_once(self, param, func, *, timeout=HOUR):
"""
Call a function from one thread, with parameters collected from all threads::
def foo(a, _sync=SYNC):
result = _sync.collect_and_call_once(a, lambda a_values: set(a_values))
assert result == {0, 1, 2, 3, 4}
MultiObject(range(5)).call(foo)
"""
self._verify_waiting_on_same_line()
self._call_once_collected_param.append(param)
self._call_once_function = func # this will be set multiple times - but there is no race so that's OK
self.barrier.wait(timeout=timeout)
if self._call_once_raised_exception:
raise self._call_once_result
else:
return self._call_once_result
def _verify_waiting_on_same_line(self):
frame = inspect.currentframe().f_back.f_back
wait_context = (frame.f_code, frame.f_lineno, frame.f_lasti)
existing_wait_context = self._wait_context
if existing_wait_context is None:
with self._lock:
# Check again inside the lock, in case it was changed
existing_wait_context = self._wait_context
if existing_wait_context is None:
self._wait_context = wait_context
if existing_wait_context is not None: # could have changed inside the lock
if wait_context != existing_wait_context:
self.barrier.abort()
raise SynchronizationCoordinatorWrongWait(
this_file=wait_context[0].co_filename,
this_line=wait_context[1],
others_file=existing_wait_context[0].co_filename,
others_line=existing_wait_context[1])
def _abandon_when_done(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
if hasattr(result, '__enter__') and hasattr(result, '__exit__'):
@contextmanager
def wrapper_cm():
try:
with result as yielded_value:
self.wait_for_everyone() # see https://github.com/weka-io/easypy/issues/150
yield yielded_value
finally:
self.abandon()
return wrapper_cm()
else:
self.abandon()
return result
except:
self.abandon()
raise
return wrapper
class SYNC(SynchronizationCoordinator):
"""Mimic ``SynchronizationCoordinator`` for running in single thread."""
def __init__(self):
pass
def wait_for_everyone(self):
pass
wait_for_everyone.__doc__ = SynchronizationCoordinator.wait_for_everyone.__doc__
def abandon(self):
pass
abandon.__doc__ = SynchronizationCoordinator.abandon.__doc__
def collect_and_call_once(self, param, func):
return func([param])
collect_and_call_once.__doc__ = SynchronizationCoordinator.collect_and_call_once.__doc__
SYNC = SYNC()
def _get_my_ident():
return Hex(threading.current_thread().ident)
class LoggedRLock():
"""
Like ``RLock``, but more logging friendly.
:param name: give it a name, so it's identifiable in the logs
:param log_interval: the interval between log messages
:param lease_expiration: throw an exception if the lock is held for more than this duration
"""
# we could inherit from this and support other types, but that'll require changes in the repr
LockType = threading.RLock
__slots__ = ("_lock", "_name", "_lease_expiration", "_lease_timer", "_log_interval", "_get_data")
_RE_OWNER = re.compile(r".*owner=(\d+) count=(\d+).*")
_MIN_TIME_FOR_LOGGING = 10
def __init__(self, name=None, log_interval=15, lease_expiration=NEVER):
self._lock = self.__class__.LockType()
self._name = name or '{}-{:X}'.format(self.LockType.__name__, id(self))
self._lease_expiration = lease_expiration
self._lease_timer = None
self._log_interval = log_interval
# we want to support both the gevent and builtin lock
try:
self._lock._owner
except AttributeError:
def _get_data():
return tuple(map(int, self._RE_OWNER.match(repr(self._lock)).groups()))
else:
def _get_data():
return self._lock._owner, self._lock._count
self._get_data = _get_data
def __repr__(self):
owner, count = self._get_data()
try:
owner = threading._active[owner].name
except KeyError:
pass
if owner:
return "<{}, owned by <{}>x{} for {}>".format(self._name, owner, count, self._lease_timer.elapsed)
else:
return "<{}, unowned>".format(self._name)
def _acquired(self, lease_expiration, should_log=False):
# we don't want to replace the lease timer, so not to effectively extend the original lease
self._lease_timer = self._lease_timer or Timer(expiration=lease_expiration or self._lease_expiration)
if should_log:
_logger.debug("%s - acquired", self)
def acquire(self, blocking=True, timeout=-1, lease_expiration=None):
if not blocking:
ret = self._lock.acquire(blocking=False)
# touch it once, so we don't hit a race since it occurs outside of the lock acquisition
lease_timer = self._lease_timer
if ret:
self._acquired(lease_expiration)
elif lease_timer and lease_timer.expired:
raise LockLeaseExpired(lock=self)
return ret
# this timer implements the 'timeout' parameter
acquisition_timer = Timer(expiration=NEVER if timeout < 0 else timeout)
while not acquisition_timer.expired:
# the timeout on actually acquiring this lock is the minimum of:
# 1. the time remaining on the acquisition timer, set by the 'timeout' param
# 2. the logging interval - the minimal frequency for logging while the lock is awaited
# 3. the time remaining on the lease timer, which would raise if expired
timeout = min(acquisition_timer.remain, self._log_interval)
# touch it once, so we don't hit a race since it occurs outside of the lock acquisition
lease_timer = self._lease_timer
if lease_timer:
timeout = min(lease_timer.remain, timeout)
if self._lock.acquire(blocking=True, timeout=timeout):
self._acquired(lease_expiration, should_log=acquisition_timer.elapsed > self._MIN_TIME_FOR_LOGGING)
return True
# touch it once, so we don't hit a race since it occurs outside of the lock acquisition
lease_timer = self._lease_timer
if lease_timer and lease_timer.expired:
raise LockLeaseExpired(lock=self)
_logger.debug("%s - waiting...", self)
def release(self, *args):
_, count = self._get_data()
if count == 1:
# we're last: clear the timer before releasing the lock!
if self._lease_timer.elapsed > self._MIN_TIME_FOR_LOGGING:
_logger.debug("%s - releasing...", self)
self._lease_timer = None
self._lock.release()
__exit__ = release
__enter__ = acquire
class RWLock(object):
"""
Read-Write Lock: allows locking exclusively and non-exclusively::
rwl = RWLock()
with rwl:
# other can acquire this lock, but not exclusively
with rwl.exclusive():
# no one can acquire this lock - we are alone here
"""
def __init__(self, name=None):
self.lock = threading.RLock()
self.cond = threading.Condition(self.lock)
self.owners = Counter()
self.name = name or '{}-{:X}'.format(self.__class__.__name__, id(self.lock))
self._lease_timer = None
def __repr__(self):
owners = ", ".join(sorted(map(str, self.owners.keys())))
if not owners:
return "<{}, unowned>".format(self.name)
lease_timer = self._lease_timer # touch once to avoid races
if lease_timer:
mode = "exclusively ({})".format(lease_timer.elapsed)
else:
mode = "non-exclusively"
return "<{}, owned by <{}> {}>".format(self.name, owners, mode)
@property
def owner_count(self):
return sum(self.owners.values())
def __call__(self):
return self
def __enter__(self):
self.acquire()
return self
def acquire(self, identifier=None):
"""
Acquire the lock as a reader.
Optionally specify the identity of the reader (defaults to thready identity).
"""
while not self.cond.acquire(timeout=15):
_logger.debug("%s - waiting...", self)
if not identifier:
identifier = _get_my_ident()
try:
self.owners[identifier] += 1
_verbose_logger.debug("%s - acquired (as reader)", self)
return self
finally:
self.cond.release()
def __exit__(self, *args):
self.release()
def release(self, identifier=None):
"""
Release the lock as a reader.
Optionally specify the identity of the reader (defaults to thready identity).
"""
while not self.cond.acquire(timeout=15):
_logger.debug("%s - waiting...", self)
if not identifier:
identifier = _get_my_ident()
try:
if not self.owners[identifier]:
raise RuntimeError("cannot release un-acquired lock")
self.owners[identifier] -= 1
if not self.owners[identifier]:
self.owners.pop(identifier)
self.cond.notify()
_verbose_logger.debug("%s - released (as reader)", self)
finally:
self.cond.release()
@contextmanager
def exclusive(self, need_to_wait_message=None):
while not self.cond.acquire(timeout=15):
_logger.debug("%s - waiting...", self)
# wait until this thread is the sole owner of this lock
while not self.cond.wait_for(lambda: self.owner_count == self.owners[_get_my_ident()], timeout=15):
_check_exiting()
if need_to_wait_message:
_logger.info(need_to_wait_message)
need_to_wait_message = None # only print it once
_logger.debug("%s - waiting (for exclusivity)...", self)
my_ident = _get_my_ident()
self.owners[my_ident] += 1
self._lease_timer = Timer()
_verbose_logger.debug("%s - acquired (as writer)", self)
try:
yield
finally:
_verbose_logger.debug('%s - releasing (as writer)', self)
self._lease_timer = None
self.owners[my_ident] -= 1
if not self.owners[my_ident]:
self.owners.pop(my_ident) # don't inflate the soft lock keys with threads that does not own it
self.cond.notify()
self.cond.release()
SoftLock = RWLock
@parametrizeable_decorator
def skip_if_locked(func=None, lock=None, default=None):
if not lock:
lock = threading.RLock()
def inner(*args, **kwargs):
if not lock.acquire(blocking=False):
_logger.debug("lock acquired - skipped %s", func)
return default
try:
return func(*args, **kwargs)
finally:
lock.release()
return inner
@parametrizeable_decorator
def with_my_lock(method, lock_attribute="_lock"):
def inner(self, *args, **kwargs):
ctx = getattr(self, lock_attribute)
if callable(ctx):
ctx = ctx()
with ctx:
return method(self, *args, **kwargs)
return inner
@parametrizeable_decorator
def synchronized(func, lock=None):
if not lock:
lock = threading.RLock()
def inner(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return inner
class SynchronizedSingleton(type):
_instances = {}
@synchronized
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
@synchronized
def get_instance(cls):
return cls._instances.get(cls)
class LoggedCondition():
"""
Like Condition, but easier to use and more logging friendly
:param name: give it a name, so it's identifiable in the logs
:param log_interval: the interval between log messages
Unlike threading.condition, .acquire() and .release() are not needed here.
Just use .wait_for() to wait for a predicate and perform the
condition-changing statements inside a .notifying_all() context::
some_flag = False
cond = Condition('some flag cond')
# Wait for condition:
cond.wait_for(lambda: some_flag, 'some flag become True')
# Trigger the condition:
with cond.notifying_all('Setting some flag to true'):
some_flag = True
"""
ConditionType = threading.Condition
__slots__ = ("_cond", "_name", "_log_interval")
def __init__(self, name=None, log_interval=15):
self._cond = self.__class__.ConditionType()
self._name = name or '{}-{:X}'.format(self.ConditionType.__name__, id(self))
self._log_interval = log_interval
def __repr__(self):
return '<{}>'.format(self._name)
@contextmanager
def _acquired_for(self, msg, *args):
while not self._cond.acquire(timeout=self._log_interval):
_logger.debug('%s - waiting to be acquired for ' + msg, self, *args)
try:
yield
finally:
self._cond.release()
@contextmanager
def notifying_all(self, msg, *args):
"""
Acquire the condition lock for the context, and notify all waiters afterward.
:param msg: Message to print to the DEBUG log after performing the command
:param args: Format arguments for msg
Users should run the command that triggers the conditions inside this context manager.
"""
with self._acquired_for('performing a %s notifying all waiters' % msg, *args):
yield
_logger.debug('%s - performed: ' + msg, self, *args)
self._cond.notifyAll()
@contextmanager
def __wait_for_impl(self, pred, msg, *args, timeout=None):
timer = Timer(expiration=timeout)
def timeout_for_condition():
remain = timer.remain
if remain:
return min(remain, self._log_interval)
else:
return self._log_interval
with self._acquired_for('checking ' + msg, *args):
while not self._cond.wait_for(pred, timeout=timeout_for_condition()):
if timer.expired:
# NOTE: without a timeout we will never get here
if pred(): # Try one last time, to make sure the last check was not (possibly too long) before the timeout
return
raise TimeoutException('{condition} timed out after {duration} waiting for {msg}',
condition=self, msg=msg % args, duration=timer.elapsed)
_logger.debug('%s - waiting for ' + msg, self, *args)
yield
def wait_for(self, pred, msg, *args, timeout=None):
"""
Wait for a predicate. Only check it when notified.
:param msg: Message to print to the DEBUG log while waiting for the predicate
:param args: Format arguments for msg
:param timeout: Maximal time to wait
"""
with self.__wait_for_impl(pred, msg, *args, timeout=timeout):
pass
@contextmanager
def waited_for(self, pred, msg, *args, timeout=None):
"""
Wait for a predicate, keep the condition lock for the context, and notify all other waiters afterward.
:param msg: Message to print to the DEBUG log while waiting for the predicate
:param args: Format arguments for msg
:param timeout: Maximal time to wait
The code inside the context should be used for altering state other waiters are waiting for.
"""
with self.__wait_for_impl(pred, msg, *args, timeout=timeout):
yield
self._cond.notifyAll()
@property
def lock(self):
"""
Use the underlying lock without notifying the waiters.
"""
return self._cond._lock
# cache result only when predicate succeeds
class CachingPredicate():
def __init__(self, pred):
self.pred = pred
def __call__(self, *args, **kwargs):
try:
return self.result
except AttributeError:
pass
ret = self.pred(*args, **kwargs)
if ret in (False, None):
return ret
self.result = ret
return self.result
def make_multipred(preds):
preds = list(map(CachingPredicate, preds))
def pred(*args, **kwargs):
results = [pred(*args, **kwargs) for pred in preds]
if all(results):
return results
return pred
def iter_wait(
timeout, pred=None, sleep=0.5, message=None,
progressbar=True, throw=True, allow_interruption=False, caption=None,
log_interval=10 * MINUTE, log_level=logging.DEBUG):
# Calling wait() with a predicate and no message is very not informative
# (throw=False or message=False disables this behavior)
if message is False:
message = None
elif throw and pred and not message:
raise Exception(
"Function iter_wait()'s parameter `message` is required if "
"`pred` is passed",
)
if timeout is None:
msg = "Waiting indefinitely%s"
else:
msg = "Waiting%%s up to %s" % time_duration(timeout)
if message is None:
if caption:
message = "Waiting %s timed out after {duration:.1f} seconds" % (caption,)
elif pred:
message = "Waiting on predicate (%s) timed out after {duration:.1f} seconds" % (pred,)
else:
message = "Timed out after {duration:.1f} seconds"
if pred:
pred_decorator = kwargs_resilient(negligible=['is_final_attempt'])
if hasattr(pred, "__iter__"):
pred = make_multipred(map(pred_decorator, pred))
else:
pred = pred_decorator(pred)
if not caption:
caption = "on predicate (%s)" % pred
else:
pred = lambda **kwargs: False
throw = False
if caption:
msg %= " %s" % (caption,)
else:
msg %= ""
if isinstance(sleep, tuple):
data = list(sleep) # can't use nonlocal since this module is indirectly used in python2
def sleep():
cur, mx = data
try:
return cur
finally:
data[0] = min(mx, cur * 1.5)
if not IS_A_TTY:
# can't interrupt
allow_interruption = False
progressbar = False
if progressbar and threading.current_thread() is not threading.main_thread():
# prevent clutter
progressbar = False
if allow_interruption:
msg += " (hit <ESC> to continue)"
l_timer = Timer(expiration=timeout)
log_timer = Timer(expiration=log_interval)
with ExitStack() as stack:
if progressbar:
from .logging import PROGRESS_BAR
pr = stack.enter_context(PROGRESS_BAR())
pr.set_message(msg)
while True:
s_timer = Timer()
expired = l_timer.expired
last_exc = None
try:
ret = pred(is_final_attempt=bool(expired))
except PredicateNotSatisfied as _exc:
if getattr(_exc, "duration", 0):
# this exception was raised by a nested 'wait' call - don't swallow it!
raise
if log_timer.expired:
log_timer.reset()
_logger.log(log_level, 'Still waiting after %r: %s', l_timer.elapsed, _exc.message)
last_exc = _exc
ret = None
else:
if ret not in (None, False):
yield ret
return
if expired:
duration = l_timer.stop()
start_time = l_timer.start_time
if throw:
if last_exc:
last_exc.add_params(duration=duration, start_time=start_time)
raise last_exc
if callable(message):
message = message()
raise TimeoutException(message, duration=duration, start_time=start_time)
yield None
return
yield l_timer.remain
sleep_for = sleep() if callable(sleep) else sleep
if allow_interruption:
from termenu.keyboard import keyboard_listener
timer = Timer(expiration=sleep_for - s_timer.elapsed)
for key in keyboard_listener(heartbeat=0.25):
if key == "esc":
yield None
return
if key == "enter":
break
if timer.expired:
break
else:
s_timeout = max(0, sleep_for - s_timer.elapsed)
if l_timer.expiration:
s_timeout = min(l_timer.remain, s_timeout)
time.sleep(s_timeout)
@wraps(iter_wait)
def wait(*args, **kwargs):
"""
Wait until ``pred`` returns a useful value (see below), or until ``timeout`` passes.
:param timeout: how long to wait for ``pred`` to get satisfied. if ``None`` waits
indefinitely.
:param pred: callable that checks the condition to wait upon.
It can return ``None`` or ``False`` to indicate that the predicate has not been satisfied.
Any other value will end the wait and that value will be returned from the wait function.
The predicate can also raise a subclass of PredicateNotSatisfied. The exception will be raised
if the timeout expires, instead of a TimeoutException.
``pred`` can be a list of predicates, and ``wait`` will wait for all of them to be satisfied.
Note that once a predicate is satisfied, it will not be called again.
If no ``pred`` is provided, ``wait`` behaves like ``sleep``.
:param sleep: the number of seconds to sleep between calls to ``pred``.
it can be a callable, or a ``(first, max)`` tuple, in which case the sleep duration will grow
exponentially from ``first`` up to ``max``.
:param message: message to use for a TimeoutException. can be a callable. To encourage the use of
informative TimeoutException messages, the user must provide a value here. If a PredicateNotSatisfied
is used in the predicate, pass ``False``.
:param progressbar: if True, show an automatic progress bar while waiting.
:param caption: message to show in progress bar, and in TimeoutException (if ``message``
not given).
:param throw: if True, throw an exception if ``timeout`` expires.
if ``pred`` not given, this is always False.
:param allow_interruption: if True, the user can end the wait prematurely by hitting ESC.
:param log_interval: interval for printing thrown ``PredicateNotSatisfied``s to the log.
Set to ``None`` to disable this logging. If the predicate returns ``False`` instead
of throwing this argument will be ignored. Defaults to 10 minutes.
:param log_level: the log level for printing the thrown ``PredicateNotSatisfied`` with
``log_interval``. Defaults to ``logging.DEBUG``.
"""
for ret in iter_wait(*args, **kwargs):
pass
return ret
def wait_progress(*args, **kwargs):
for _ in iter_wait_progress(*args, **kwargs):
pass
def iter_wait_progress(state_getter, advance_timeout, total_timeout=float("inf"), state_threshold=0, sleep=0.5, throw=True,
allow_regression=True, advancer_name='', progressbar=True):
if advancer_name:
advancer_name += ' '
advance_timeout_message = advancer_name + "did not advance for {duration: .1f} seconds"
total_timeout_message = advancer_name + "advanced but failed to finish in {duration: .1f} seconds"
state = state_getter() # state_getter should return a number, represent current state
progress = Bunch(state=state, finished=False, changed=False)
progress.total_timer = Timer(expiration=total_timeout)
progress.advance_timer = Timer(expiration=advance_timeout)
def did_advance():
current_state = state_getter()
progress.advanced = progress.state > current_state
progress.changed = progress.state != current_state
if progress.advanced or allow_regression:
progress.state = current_state
return progress.advanced
# We want to make sure each iteration sleeps at least once,
# since the internal 'wait' could return immediately without sleeping at all,
# and if the external while loop isn't done we could be iterating too much
min_sleep = None
while progress.state > state_threshold:
if progress.total_timer.expired:
raise TimeoutException(total_timeout_message, duration=progress.total_timer.duration)
progress.timeout, message, timer = min(
(progress.total_timer.remain, total_timeout_message, progress.total_timer),
(progress.advance_timer.remain, advance_timeout_message, progress.advance_timer))
if min_sleep:
wait(min_sleep.remain)
min_sleep = Timer(expiration=sleep)
result = wait(progress.timeout, pred=did_advance, sleep=sleep, throw=False, progressbar=progressbar)
if result:
pass
elif not throw:
# wait timed out without throwing
return
else:
raise TimeoutException(message, duration=timer.duration)
progress.advance_timer.reset()
yield progress
progress.finished = True
yield progress # indicate success
from .timing import Timer # noqa; avoid import cycle
|
hedgerbot.py
|
import logging
from .observer import Observer
import json
import time
import os
import math
import os, time
import sys
import traceback
import config
from private_markets import haobtccny, brokercny
from .marketmaker import MarketMaker
import threading
class HedgerBot(MarketMaker):
exchange = 'HaobtcCNY'
hedger = 'BrokerCNY'
out_dir = 'hedger_history/'
filename = exchange + '-bot.csv'
def __init__(self):
super().__init__()
self.clients = {
"HaobtcCNY": haobtccny.PrivateHaobtcCNY(config.HAOBTC_API_KEY, config.HAOBTC_SECRET_TOKEN),
"BrokerCNY": brokercny.PrivateBrokerCNY(),
}
self.taker_fee = 0.002
self.bid_fee_rate = config.bid_fee_rate
self.ask_fee_rate = config.ask_fee_rate
self.bid_price_risk = config.bid_price_risk
self.ask_price_risk = config.ask_price_risk
self.peer_exchange = self.hedger
try:
os.mkdir(self.out_dir)
except:
pass
t = threading.Thread(target = self.msg_server)
t.start()
logging.info('HedgerBot Setup complete')
# time.sleep(2)
def process_message(self,message):
kexchange = self.exchange
try:
message = message.decode('utf-8')
message = json.loads(message)
logging.info('msg:%s', message)
type = message['type']
price = message['price']
logging.info('msg type:%s %s', type, price)
if type == 'buy':
buy_orders = self.get_orders('buy')
buy_orders.sort(key=lambda x: x['price'], reverse=True)
for buy_order in buy_orders:
if buy_order['price'] == price:
self.cancel_order(kexchange, 'buy', buy_order['id'])
break
elif type == 'sell':
sell_orders = self.get_orders('sell')
sell_orders.sort(key=lambda x: x['price'])
for sell_order in sell_orders:
if sell_order['price'] == price:
self.cancel_order(kexchange, 'sell', sell_order['id'])
break
except Exception as e:
logging.error("process message exception %s", e)
traceback.print_exc()
def market_maker(self, depths):
# super().market_maker(depths)
kexchange = self.exchange
# update price
try:
bid_price = int(depths[self.exchange]["bids"][0]['price'])
ask_price = int(depths[self.exchange]["asks"][0]['price'])
bid_amount = (depths[self.exchange]["bids"][0]['amount'])
ask_amount = (depths[self.exchange]["asks"][0]['amount'])
except Exception as ex:
logging.warn("exception haobtc depths:%s" % ex)
traceback.print_exc()
bid_price = 0
ask_price = 0
bid_amount = 0
ask_amount = 0
try:
peer_bid_price = int(depths[self.peer_exchange]["bids"][0]['price'])
peer_ask_price = int(depths[self.peer_exchange]["asks"][0]['price'])
except Exception as ex:
logging.warn("exception peer depths:%s" % ex)
traceback.print_exc()
return
if peer_bid_price == 0 or peer_ask_price == 0:
logging.warn("exception ticker")
return
if bid_price < 1:
bid_price = 100
if ask_price < 1:
ask_price = 100000
peer_bid_price = peer_bid_price*(1-self.taker_fee) - self.bid_price_risk
peer_ask_price = peer_ask_price*(1+self.taker_fee) + self.ask_price_risk
buyprice = int(peer_bid_price) - 1
sellprice = int(peer_ask_price) + 1
min_buy_price = buyprice - config.MAKER_BUY_QUEUE*config.MAKER_BUY_STAGE
max_sell_price = sellprice + config.MAKER_SELL_QUEUE*config.MAKER_SELL_STAGE
self.buyprice_spread = set(range(min_buy_price+1, buyprice+1))
self.sellprice_spread = set(range(sellprice, max_sell_price))
logging.debug("%s/%s", self.sellprice_spread, self.buyprice_spread)
self.buyprice = buyprice
self.sellprice = sellprice
# Update client balance
self.update_balance()
# query orders
if self.is_buying():
buy_orders = self.get_orders('buy')
buy_orders.sort(key=lambda x: x['price'], reverse=True)
buy_prices = [x['price'] for x in buy_orders]
logging.debug(buy_prices)
for buy_order in buy_orders:
logging.debug(buy_order)
result = self.clients[kexchange].get_order(buy_order['id'])
logging.debug (result)
if not result:
logging.warn("get_order buy #%s failed" % (buy_order['id']))
return
self.hedge_order(buy_order, result)
if result['status'] == 'CLOSE' or result['status'] == 'CANCELED':
self.remove_order(buy_order['id'])
elif (result['price'] not in self.buyprice_spread):
logging.info("cancel buyprice %s result['price'] = %s" % (self.buyprice_spread, result['price']))
self.cancel_order(kexchange, 'buy', buy_order['id'])
if self.is_selling():
sell_orders = self.get_orders('sell')
sell_orders.sort(key=lambda x: x['price'])
sell_prices = [x['price'] for x in sell_orders]
logging.debug(sell_prices)
for sell_order in self.get_orders('sell'):
logging.debug(sell_order)
result = self.clients[kexchange].get_order(sell_order['id'])
logging.debug (result)
if not result:
logging.warn("get_order sell #%s failed" % (sell_order['id']))
return
self.hedge_order(sell_order, result)
if result['status'] == 'CLOSE' or result['status'] == 'CANCELED':
self.remove_order(sell_order['id'])
elif (result['price'] not in self.sellprice_spread):
logging.info("cancel sellprice %s result['price'] = %s" % (self.sellprice_spread, result['price']))
self.cancel_order(kexchange, 'sell', sell_order['id'])
# excute maker trade
if config.MAKER_TRADE_ENABLE:
if self.buying_len() < config.MAKER_BUY_QUEUE:
self.new_order(kexchange, 'buy')
if self.selling_len() < config.MAKER_SELL_QUEUE:
self.new_order(kexchange, 'sell')
# excute taker trade
if config.TAKER_TRADE_ENABLE:
taker_buy_price = peer_bid_price*(1-self.taker_fee)
taker_sell_price = peer_ask_price*(1+self.taker_fee)
logging.debug("price [%s,%s], peer [%s,%s] taker price:[%s,%s]", ask_price, bid_price, peer_ask_price, peer_bid_price, taker_buy_price, taker_sell_price)
if ask_price!=0 and ask_price < taker_buy_price:
if ask_amount < 0.1:
ask_amount = 0.1
ask_amount+=0.01
logging.info("to taker buy %s<%s", ask_price, taker_buy_price)
self.new_order(kexchange, 'buy', maker_only=False, amount=ask_amount, price=ask_price)
return
if bid_price != 0 and bid_price > taker_sell_price:
if bid_amount < 0.1:
bid_amount = 0.1
bid_amount +=0.01
logging.info("to taker sell %s>%s", bid_price, taker_sell_price)
self.new_order(kexchange, 'sell', maker_only=False, amount= bid_amount, price=bid_price)
return
def get_sell_price(self):
sell_orders = self.get_orders('sell')
sell_prices = [x['price'] for x in sell_orders]
price_candidate_set = set(self.sellprice_spread) - set(sell_prices)
price_candidate_list = list(price_candidate_set)
# price_candidate_list.sort()
for x in price_candidate_list:
return x
return super().get_sell_price()
logging.error (sell_orders)
logging.error (sell_prices)
logging.error (price_candidate_set)
def get_buy_price(self):
buy_orders = self.get_orders('buy')
buy_prices = [x['price'] for x in buy_orders]
price_candidate_set = set(self.buyprice_spread) - set(buy_prices)
price_candidate_list = list(price_candidate_set)
# price_candidate_list.sort(reverse=True)
for x in price_candidate_list:
return x
return super().get_buy_price()
logging.error(self.buyprice_spread)
logging.error (buy_orders)
logging.error (buy_prices)
logging.error (price_candidate_set)
def hedge_order(self, order, result):
if result['deal_size'] <= 0:
logging.debug("[hedger]NOTHING TO BE DEALED.")
return
order_id = result['order_id']
deal_size = result['deal_size']
price = result['avg_price']
amount = deal_size - order['deal_amount']
if amount <= config.broker_min_amount:
logging.debug("[hedger]deal nothing while.")
return
maker_only = order['maker_only']
client_id = str(order_id) + '-' + str(order['deal_index'])+('' if maker_only else '-taker')
logging.info("hedge new deal: %s", result)
hedge_side = 'SELL' if result['side'] =='BUY' else 'BUY'
logging.info('hedge [%s] to broker: %s %s %s', client_id, hedge_side, amount, price)
if hedge_side == 'SELL':
self.clients[self.hedger].sell(amount, price, client_id)
else:
self.clients[self.hedger].buy(amount, price, client_id)
# update the deal_amount of local order
self.remove_order(order_id)
order['deal_amount'] = deal_size
order['deal_index'] +=1
self.orders.append(order)
|
data.py
|
import os
# import cv2
import random
import tempfile
import numpy as np
from Queue import Queue
from threading import Thread
import nipy
from data_providers.base_provider import VideosDataset, DataProvider
from datasets.hdf5_loader import get_data
class Data(VideosDataset):
def __init__(self, name, train_ids, normalization, sequence_length,
crop_size, num_classes, queue_size, all_hdf5_data):
"""
Args:
name: str, name of the data (train, test or validation)
paths: list, list of string that have the video path and label
information
sequence_length: video clip length
crop_size: `tuple`, image resize size (width, height)
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
num_classes: `integer`, number of classes that the dataset has
queue_size: `integer`, data queue size
"""
self.name = name
self.train_ids = train_ids
self.normalization = normalization
self.sequence_length = sequence_length # slices
self.crop_size = crop_size
self.num_classes = num_classes
self.queue = DataQueue(name, queue_size)
self.examples = None
self.all_hdf5_data = all_hdf5_data
self._start_data_thread()
def get_frames_data(self, filename, num_frames_per_clip=16):
''' Given a directory containing extracted frames, return a video clip of
(num_frames_per_clip) consecutive frames as a list of np arrays
Args
num_frames_per_clip: sequence_length of the video clip(slices)
Returns
mri: numpy, video clip with shape
[sequence_length, height, width, channels]
-------i'm splitters------------------------------
video = []
s_index = 0
for parent, dirnames, files in os.walk(filename):
filenames = [ fi for fi in files if fi.endswith((".nii", ".jpg", "jpeg")) ]
if(len(filenames) < num_frames_per_clip):
return None
suffix = filenames[0].split('.', 1)[1]
filenames_int = [i.split('.', 1)[0] for i in filenames]
filenames_int = sorted(filenames_int)
s_index = random.randint(0, len(filenames) - num_frames_per_clip)
for i in range(s_index, s_index + num_frames_per_clip):
image_name = str(filename) + '/' + str(filenames_int[i]) + '.' + suffix
# print image_name
img = cv2.imread(image_name)
img = cv2.resize(img, self.crop_size)
if self.normalization:
img_data = self.normalize_image(img, self.normalization)
video.append(img_data)
return video
'''
mri = nipy.load_image(filename).get_data().transpose((1, 0, 2))
data = np.zeros((num_frames_per_clip, mri.shape[1], mri.shape[2], 1), dtype='float32')
mx = mri.max(axis=0).max(axis=0).max(axis=0)
mri = np.array(mri) / mx
if mri.shape[0] < num_frames_per_clip:
return None
if mri.shape[0] > num_frames_per_clip:
mri = mri[:num_frames_per_clip, :, :]
data[:, :, :, 0] = mri
return data
def put_less_data_in_queue_2_class(self,num_less_label_data):
each_class_data_num=num_less_label_data//self.num_classes
count_class_1 = 0
count_class_2 = 0
while True:
# print 'enter for...'*n
index = random.randint(0, len(self.train_ids) - 1)
mri, label = get_data(self.train_ids[index], self.all_hdf5_data)
if count_class_1 < each_class_data_num and int(label[0])==1:
count_class_1=count_class_1+1
elif count_class_2 < each_class_data_num and int(label[1])==1:
count_class_2 = count_class_2 + 1
else:
self.queue.put((mri, label))
self.records_in_queue = self.records_in_queue + 1
def put_less_data_in_queue_3_class(self,num_less_label_data):
each_class_data_num = num_less_label_data // self.num_classes
count_class_1 = 0
count_class_2 = 0
count_class_3 = 0
while True:
# print 'enter for...'*n
index = random.randint(0, len(self.train_ids) - 1)
mri, label = get_data(self.train_ids[index], self.all_hdf5_data)
if count_class_1 < each_class_data_num and int(label[0]) == 1:
count_class_1 = count_class_1 + 1
elif count_class_2 < each_class_data_num and int(label[1]) == 1:
count_class_2 = count_class_2 + 1
elif count_class_3 < each_class_data_num and int(label[2]) == 1:
count_class_3 = count_class_3 + 1
else:
self.queue.put((mri, label))
self.records_in_queue = self.records_in_queue + 1
def extract_video_data(self):
''' Single tread to extract video and label information from the dataset
'''
# Generate one randome index and
while True:
# print 'enter for...'*n
index = random.randint(0, len(self.train_ids) - 1)
mri, label = get_data(self.train_ids[index], self.all_hdf5_data)
self.queue.put((mri, label))
def _start_data_thread(self):
print("Start thread: %s data preparation ..." % self.name)
self.worker = Thread(target=self.extract_video_data)
self.worker.setDaemon(True)
self.worker.start()
@property
def num_examples(self):
return len(self.train_ids)
def next_batch(self, batch_size):
''' Get the next batches of the dataset
Args
batch_size: video batch size
Returns
videos: numpy, shape
[batch_size, sequence_length, height, width, channels]
labels: numpy
[batch_size, num_classes]
'''
# print "begin a batch"
mris, labels = self.queue.get(batch_size)
mris = np.array(mris)
labels = np.array(labels)
# labels = self.labels_to_one_hot(labels, self.num_classes)
return mris, labels
class DataQueue():
def __init__(self, name, maximum_item, block=True):
"""
Args
name: str, data type name (train, validation or test)
maximum_item: integer, maximum item that this queue can store
block: boolean, block the put or get information if the queue is
full or empty
"""
self.name = name
self.block = block
self.maximum_item = maximum_item
self.queue = Queue(maximum_item)
@property
def queue(self):
return self.queue
@property
def name(self):
return self.name
def put(self, data):
self.queue.put(data, self.block)
def get(self, batch_size):
'''
Args:
batch_size: integer, the number of the item you want to get from the queue
Returns:
videos: list, list of numpy data with shape
[sequence_length, height, width, channels]
labels: list, list of integer number
'''
videos = []
labels = []
for i in range(batch_size):
video, label = self.queue.get(self.block)
videos.append(video)
labels.append(label)
return videos, labels
class DataProvider(DataProvider):
def __init__(self, config, dataset_train_unlabelled,dataset_train_labelled, dataset_test, all_hdf5_data_train, all_hdf5_data_test, dataset_val, all_hdf5_data_val,whichFoldData):
"""
Args:
num_classes: the number of the classes
validation_set: `bool`.
validation_split: `int` or None
float: chunk of `train set` will be marked as `validation set`.
None: if 'validation set' == True, `validation set` will be
copy of `test set`
normalization: `str` or None
None: no any normalization
divide_255: divide all pixels by 255
divide_256: divide all pixels by 256
sequence_length: `integer`, video clip length
crop_size: `tuple`, the size that you want to reshape the images, (width, height)
train: `boolean`, whether we need the training queue or not
test: `test`, whether we need the testing queue or not
queue_size: `integer`, data queue size , default is 300
"""
self._sequence_length = 109
self._crop_size = (91, 91)
img, label = get_data(dataset_train_unlabelled[0], all_hdf5_data_train)
print("step3")
config.h = img.shape[0]
config.w = img.shape[1]
config.c = img.shape[2]
# if len(img.shape) == 3:
# config.c = img.shape[2]
# else:
# config.c = 1
config.num_class = label.shape[0]
self._num_classes = label.shape[0]
self.num_less_label_data = config.num_less_label_data
if config.train:
self.train_unlabelled = Data('train_unlabelled', dataset_train_unlabelled,
'std', self._sequence_length,
self._crop_size, config.num_class, config.queue_size, all_hdf5_data_train)
self.train_labelled = Data('train_labelled', dataset_train_labelled,
'std', self._sequence_length,
self._crop_size, config.num_class, config.queue_size, all_hdf5_data_train)
if config.test:
self.validate = Data('validate', dataset_val,
'std', self._sequence_length,
self._crop_size, config.num_class, config.queue_size, all_hdf5_data_val)
self.test = Data('test', dataset_test,
'std', self._sequence_length,
self._crop_size, config.num_class, config.queue_size, all_hdf5_data_test)
def get_videos_labels_lines(self, path):
# Open the file according to the filename
lines = open(path, 'r')
lines = list(lines)
new_lines = [os.path.join(self._path, line) for line in lines]
return new_lines
@property
def data_shape(self):
return (self._sequence_length, self._crop_size[1], self._crop_size[0], 1)
@property
def n_classes(self):
return self._num_classes
|
runner.py
|
import argparse
import json
import logging
import os
import threading
import time
import traceback
import colors
import docker
import numpy
import psutil
from ann_benchmarks.algorithms.definitions import (Definition,
instantiate_algorithm)
from ann_benchmarks.datasets import get_dataset, DATASETS
from ann_benchmarks.distance import metrics, dataset_transform
from ann_benchmarks.results import store_results
def run_individual_query(algo, X_train, X_test, distance, count, run_count,
batch):
prepared_queries = \
(batch and hasattr(algo, "prepare_batch_query")) or \
((not batch) and hasattr(algo, "prepare_query"))
best_search_time = float('inf')
for i in range(run_count):
print('Run %d/%d...' % (i + 1, run_count))
# a bit dumb but can't be a scalar since of Python's scoping rules
n_items_processed = [0]
def single_query(v):
if prepared_queries:
algo.prepare_query(v, count)
start = time.time()
algo.run_prepared_query()
total = (time.time() - start)
candidates = algo.get_prepared_query_results()
else:
start = time.time()
candidates = algo.query(v, count)
total = (time.time() - start)
candidates = [(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in candidates]
n_items_processed[0] += 1
if n_items_processed[0] % 1000 == 0:
print('Processed %d/%d queries...' % (n_items_processed[0], len(X_test)))
if len(candidates) > count:
print('warning: algorithm %s returned %d results, but count'
' is only %d)' % (algo, len(candidates), count))
return (total, candidates)
def batch_query(X):
if prepared_queries:
algo.prepare_batch_query(X, count)
start = time.time()
algo.run_batch_query()
total = (time.time() - start)
else:
start = time.time()
algo.batch_query(X, count)
total = (time.time() - start)
results = algo.get_batch_results()
candidates = [[(int(idx), float(metrics[distance]['distance'](v, X_train[idx]))) # noqa
for idx in single_results]
for v, single_results in zip(X, results)]
return [(total / float(len(X)), v) for v in candidates]
if batch:
results = batch_query(X_test)
else:
results = [single_query(x) for x in X_test]
total_time = sum(time for time, _ in results)
total_candidates = sum(len(candidates) for _, candidates in results)
search_time = total_time / len(X_test)
avg_candidates = total_candidates / len(X_test)
best_search_time = min(best_search_time, search_time)
verbose = hasattr(algo, "query_verbose")
attrs = {
"batch_mode": batch,
"best_search_time": best_search_time,
"candidates": avg_candidates,
"expect_extra": verbose,
"name": str(algo),
"run_count": run_count,
"distance": distance,
"count": int(count)
}
additional = algo.get_additional()
for k in additional:
attrs[k] = additional[k]
return (attrs, results)
def run(definition, dataset, count, run_count, batch):
algo = instantiate_algorithm(definition)
assert not definition.query_argument_groups \
or hasattr(algo, "set_query_arguments"), """\
error: query argument groups have been specified for %s.%s(%s), but the \
algorithm instantiated from it does not implement the set_query_arguments \
function""" % (definition.module, definition.constructor, definition.arguments)
D = get_dataset(dataset)
X_train = numpy.array(D['train'])
X_test = numpy.array(D['test'])
distance = D.attrs['distance']
print('got a train set of size (%d * %d)' % X_train.shape)
print('got %d queries' % len(X_test))
X_train = dataset_transform[distance](X_train)
X_test = dataset_transform[distance](X_test)
try:
prepared_queries = False
if hasattr(algo, "supports_prepared_queries"):
prepared_queries = algo.supports_prepared_queries()
t0 = time.time()
memory_usage_before = algo.get_memory_usage()
algo.fit(X_train)
build_time = time.time() - t0
index_size = algo.get_memory_usage() - memory_usage_before
print('Built index in', build_time)
print('Index size: ', index_size)
query_argument_groups = definition.query_argument_groups
# Make sure that algorithms with no query argument groups still get run
# once by providing them with a single, empty, harmless group
if not query_argument_groups:
query_argument_groups = [[]]
for pos, query_arguments in enumerate(query_argument_groups, 1):
print("Running query argument group %d of %d..." %
(pos, len(query_argument_groups)))
if query_arguments:
algo.set_query_arguments(*query_arguments)
descriptor, results = run_individual_query(
algo, X_train, X_test, distance, count, run_count, batch)
descriptor["build_time"] = build_time
descriptor["index_size"] = index_size
descriptor["algo"] = definition.algorithm
descriptor["dataset"] = dataset
store_results(dataset, count, definition,
query_arguments, descriptor, results, batch)
finally:
algo.done()
def run_from_cmdline():
parser = argparse.ArgumentParser('''
NOTICE: You probably want to run.py rather than this script.
''')
parser.add_argument(
'--dataset',
choices=DATASETS.keys(),
help=f'Dataset to benchmark on.',
required=True)
parser.add_argument(
'--algorithm',
help='Name of algorithm for saving the results.',
required=True)
parser.add_argument(
'--module',
help='Python module containing algorithm. E.g. "ann_benchmarks.algorithms.annoy"',
required=True)
parser.add_argument(
'--constructor',
help='Constructer to load from modulel. E.g. "Annoy"',
required=True)
parser.add_argument(
'--count',
help='K: Number of nearest neighbours for the algorithm to return.',
required=True,
type=int)
parser.add_argument(
'--runs',
help='Number of times to run the algorihm. Will use the fastest run-time over the bunch.',
required=True,
type=int)
parser.add_argument(
'--batch',
help='If flag included, algorithms will be run in batch mode, rather than "individual query" mode.',
action='store_true')
parser.add_argument(
'build',
help='JSON of arguments to pass to the constructor. E.g. ["angular", 100]'
)
parser.add_argument(
'queries',
help='JSON of arguments to pass to the queries. E.g. [100]',
nargs='*',
default=[])
args = parser.parse_args()
algo_args = json.loads(args.build)
print(algo_args)
query_args = [json.loads(q) for q in args.queries]
definition = Definition(
algorithm=args.algorithm,
docker_tag=None, # not needed
module=args.module,
constructor=args.constructor,
arguments=algo_args,
query_argument_groups=query_args,
disabled=False
)
run(definition, args.dataset, args.count, args.runs, args.batch)
def run_docker(definition, dataset, count, runs, timeout, batch, cpu_limit,
mem_limit=None):
cmd = ['--dataset', dataset,
'--algorithm', definition.algorithm,
'--module', definition.module,
'--constructor', definition.constructor,
'--runs', str(runs),
'--count', str(count)]
if batch:
cmd += ['--batch']
cmd.append(json.dumps(definition.arguments))
cmd += [json.dumps(qag) for qag in definition.query_argument_groups]
client = docker.from_env()
if mem_limit is None:
mem_limit = psutil.virtual_memory().available
container = client.containers.run(
definition.docker_tag,
cmd,
volumes={
os.path.abspath('ann_benchmarks'):
{'bind': '/home/app/ann_benchmarks', 'mode': 'ro'},
os.path.abspath('data'):
{'bind': '/home/app/data', 'mode': 'ro'},
os.path.abspath('results'):
{'bind': '/home/app/results', 'mode': 'rw'},
},
cpuset_cpus=cpu_limit,
mem_limit=mem_limit,
detach=True)
logger = logging.getLogger(f"annb.{container.short_id}")
logger.info('Created container %s: CPU limit %s, mem limit %s, timeout %d, command %s' % \
(container.short_id, cpu_limit, mem_limit, timeout, cmd))
def stream_logs():
for line in container.logs(stream=True):
logger.info(colors.color(line.decode().rstrip(), fg='blue'))
t = threading.Thread(target=stream_logs, daemon=True)
t.start()
try:
exit_code = container.wait(timeout=timeout)
# Exit if exit code
if exit_code not in [0, None]:
logger.error(colors.color(container.logs().decode(), fg='red'))
logger.error('Child process for container %s raised exception %d' % (container.short_id, exit_code))
except:
logger.error('Container.wait for container %s failed with exception' % container.short_id)
traceback.print_exc()
finally:
container.remove(force=True)
|
server.py
|
#!/usr/bin/env python
import socket, threading, time
def handle(s):
print repr(s.recv(4096))
s.send('''
HTTP/1.1 101 Web Socket Protocol Handshake\r
Upgrade: WebSocket\r
Connection: Upgrade\r
WebSocket-Origin: http://bettingisbelieving.com:8888\r
WebSocket-Location: ws://bettingisbelieving.com:9876/\r
WebSocket-Protocol: sample
'''.strip() + '\r\n\r\n')
time.sleep(1)
s.send('\x00hello\xff')
time.sleep(1)
s.send('\x00world\xff')
s.close()
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', 9876));
s.listen(1);
while 1:
t,_ = s.accept();
threading.Thread(target = handle, args = (t,)).start()
|
web_control.py
|
#!/usr/bin/env python
# coding:utf-8
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
import re
import SocketServer, socket, ssl
import BaseHTTPServer
import errno
import urlparse
import threading
import urllib2
import time
import datetime
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
import yaml
import json
from instances import xlog
import module_init
import config
import autorun
import update_from_github
import simple_http_server
import jinja2_i18n_helper
NetWorkIOError = (socket.error, ssl.SSLError, OSError)
module_menus = {}
class Http_Handler(simple_http_server.HttpServerHandler):
deploy_proc = None
def load_module_menus(self):
global module_menus
module_menus = {}
#config.load()
modules = config.get(['modules'], None)
for module in modules:
values = modules[module]
if module != "launcher" and config.get(["modules", module, "auto_start"], 0) != 1: # skip php_proxy module
continue
#version = values["current_version"]
menu_path = os.path.join(root_path, module, "web_ui", "menu.yaml") # launcher & gae_proxy modules
if not os.path.isfile(menu_path):
continue
#module_menu = yaml.load(file(menu_path, 'r')) # non-i18n
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
template_dir = os.path.abspath(os.path.join(root_path, module, 'web_ui'))
jinja2_i18n_helper.ihelper.refresh_env(locale_dir, template_dir)
stream = jinja2_i18n_helper.ihelper.render("menu.yaml", None)
module_menu = yaml.load(stream)
module_menus[module] = module_menu
module_menus = sorted(module_menus.iteritems(), key=lambda (k,v): (v['menu_sort_id']))
#for k,v in self.module_menus:
# logging.debug("m:%s id:%d", k, v['menu_sort_id'])
def do_POST(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
#url_path = urlparse.urlparse(self.path).path
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", self.path)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_POST()
return
def do_GET(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
# check for '..', which will leak file
if re.search(r'(\.{2})', self.path) is not None:
self.wfile.write(b'HTTP/1.1 404\r\n\r\n')
xlog.warn('%s %s %s haking', self.address_string(), self.command, self.path )
return
url_path = urlparse.urlparse(self.path).path
if url_path == '/':
return self.req_index_handler()
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", url_path)
self.send_not_found()
return
if "imp" not in module_init.proc_handler[module]:
xlog.warn("request module:%s start fail", module)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_GET()
return
else:
relate_path = '/'.join(url_path_list[3:])
file_path = os.path.join(root_path, module, "web_ui", relate_path)
if not os.path.isfile(file_path):
return self.send_not_found()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
template_dir = os.path.abspath(os.path.join(root_path, module, 'web_ui'))
jinja2_i18n_helper.ihelper.refresh_env(locale_dir, template_dir)
content = jinja2_i18n_helper.ihelper.render(relate_path, None)
return self.send_response('text/html', content)
else:
file_path = os.path.join(current_path, 'web_ui' + url_path)
xlog.debug ('launcher web_control %s %s %s ', self.address_string(), self.command, self.path)
if os.path.isfile(file_path):
if file_path.endswith('.js'):
mimetype = 'application/javascript'
elif file_path.endswith('.css'):
mimetype = 'text/css'
elif file_path.endswith('.html'):
mimetype = 'text/html'
elif file_path.endswith('.jpg'):
mimetype = 'image/jpeg'
elif file_path.endswith('.png'):
mimetype = 'image/png'
else:
mimetype = 'text/plain'
self.send_file(file_path, mimetype)
elif url_path == '/config':
self.req_config_handler()
elif url_path == '/download':
self.req_download_handler()
elif url_path == '/init_module':
self.req_init_module_handler()
elif url_path == '/quit':
self.send_response('text/html', '{"status":"success"}')
module_init.stop_all()
os._exit(0)
elif url_path == '/restart':
self.send_response('text/html', '{"status":"success"}')
update_from_github.restart_xxnet()
else:
self.send_not_found()
xlog.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def req_index_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
try:
target_module = reqs['module'][0]
target_menu = reqs['menu'][0]
except:
if config.get(['modules', 'gae_proxy', 'auto_start'], 0) == 1:
target_module = 'gae_proxy'
target_menu = 'status'
else:
target_module = 'launcher'
target_menu = 'about'
if len(module_menus) == 0:
self.load_module_menus()
# Old code without i18n
#index_path = os.path.join(current_path, 'web_ui', "index.html")
#with open(index_path, "r") as f:
# index_content = f.read()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(current_path, 'lang'))
template_dir = os.path.abspath(os.path.join(current_path, 'web_ui'))
jinja2_i18n_helper.ihelper.refresh_env(locale_dir, template_dir)
index_content = jinja2_i18n_helper.ihelper.render("index.html", None)
menu_content = ''
for module,v in module_menus:
#logging.debug("m:%s id:%d", module, v['menu_sort_id'])
title = v["module_title"]
menu_content += '<li class="nav-header">%s</li>\n' % title
for sub_id in v['sub_menus']:
sub_title = v['sub_menus'][sub_id]['title']
sub_url = v['sub_menus'][sub_id]['url']
if target_module == title and target_menu == sub_url:
active = 'class="active"'
else:
active = ''
menu_content += '<li %s><a href="/?module=%s&menu=%s">%s</a></li>\n' % (active, module, sub_url, sub_title)
right_content_file = os.path.join(root_path, target_module, "web_ui", target_menu + ".html")
if os.path.isfile(right_content_file):
# Old code without i18n
#with open(right_content_file, "r") as f:
# right_content = f.read()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, target_module, 'lang'))
template_dir = os.path.abspath(os.path.join(root_path, target_module, 'web_ui'))
jinja2_i18n_helper.ihelper.refresh_env(locale_dir, template_dir)
right_content = jinja2_i18n_helper.ihelper.render(target_menu + ".html", None)
else:
right_content = ""
data = (index_content.decode('utf-8') % (menu_content, right_content.decode('utf-8') )).encode('utf-8')
self.send_response('text/html', data)
def req_config_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
current_version = update_from_github.current_version()
if reqs['cmd'] == ['get_config']:
config.load()
check_update = config.get(["update", "check_update"], 1)
if check_update == 0:
check_update = "dont-check"
elif check_update == 1:
check_update = "stable"
data = '{ "check_update": "%s", "popup_webui": %d, "allow_remote_connect": %d, "show_systray": %d, "auto_start": %d, "php_enable": %d, "gae_proxy_enable": %d }' %\
(check_update
, config.get(["modules", "launcher", "popup_webui"], 1)
, config.get(["modules", "launcher", "allow_remote_connect"], 0)
, config.get(["modules", "launcher", "show_systray"], 1)
, config.get(["modules", "launcher", "auto_start"], 0)
, config.get(["modules", "php_proxy", "auto_start"], 0)
, config.get(["modules", "gae_proxy", "auto_start"], 0))
elif reqs['cmd'] == ['set_config']:
if 'check_update' in reqs:
check_update = reqs['check_update'][0]
if check_update not in ["dont-check", "stable", "test"]:
data = '{"res":"fail, check_update:%s"}' % check_update
else:
config.set(["update", "check_update"], check_update)
config.save()
data = '{"res":"success"}'
elif 'popup_webui' in reqs:
popup_webui = int(reqs['popup_webui'][0])
if popup_webui != 0 and popup_webui != 1:
data = '{"res":"fail, popup_webui:%s"}' % popup_webui
else:
config.set(["modules", "launcher", "popup_webui"], popup_webui)
config.save()
data = '{"res":"success"}'
elif 'allow_remote_connect' in reqs:
allow_remote_connect = int(reqs['allow_remote_connect'][0])
if allow_remote_connect != 0 and allow_remote_connect != 1:
data = '{"res":"fail, allow_remote_connect:%s"}' % allow_remote_connect
else:
config.set(["modules", "launcher", "allow_remote_connect"], allow_remote_connect)
config.save()
data = '{"res":"success"}'
xlog.debug("restart web control.")
stop()
time.sleep(1)
start()
xlog.debug("launcher web control restarted.")
elif 'show_systray' in reqs:
show_systray = int(reqs['show_systray'][0])
if show_systray != 0 and show_systray != 1:
data = '{"res":"fail, show_systray:%s"}' % show_systray
else:
config.set(["modules", "launcher", "show_systray"], show_systray)
config.save()
data = '{"res":"success"}'
elif 'auto_start' in reqs:
auto_start = int(reqs['auto_start'][0])
if auto_start != 0 and auto_start != 1:
data = '{"res":"fail, auto_start:%s"}' % auto_start
else:
if auto_start:
autorun.enable()
else:
autorun.disable()
config.set(["modules", "launcher", "auto_start"], auto_start)
config.save()
data = '{"res":"success"}'
elif 'gae_proxy_enable' in reqs :
gae_proxy_enable = int(reqs['gae_proxy_enable'][0])
if gae_proxy_enable != 0 and gae_proxy_enable != 1:
data = '{"res":"fail, gae_proxy_enable:%s"}' % gae_proxy_enable
else:
config.set(["modules", "gae_proxy", "auto_start"], gae_proxy_enable)
config.save()
if gae_proxy_enable:
module_init.start("gae_proxy")
else:
module_init.stop("gae_proxy")
self.load_module_menus()
data = '{"res":"success"}'
elif 'php_enable' in reqs :
php_enable = int(reqs['php_enable'][0])
if php_enable != 0 and php_enable != 1:
data = '{"res":"fail, php_enable:%s"}' % php_enable
else:
config.set(["modules", "php_proxy", "auto_start"], php_enable)
config.save()
if php_enable:
module_init.start("php_proxy")
else:
module_init.stop("php_proxy")
self.load_module_menus()
data = '{"res":"success"}'
else:
data = '{"res":"fail"}'
elif reqs['cmd'] == ['get_new_version']:
versions = update_from_github.get_github_versions()
data = '{"res":"success", "test_version":"%s", "stable_version":"%s", "current_version":"%s"}' % (versions[0][1], versions[1][1], current_version)
xlog.info("%s", data)
elif reqs['cmd'] == ['update_version']:
version = reqs['version'][0]
try:
update_from_github.update_version(version)
data = '{"res":"success"}'
except Exception as e:
xlog.info("update_test_version fail:%r", e)
data = '{"res":"fail", "error":"%s"}' % e
self.send_response('text/html', data)
def req_download_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
if reqs['cmd'] == ['get_progress']:
data = json.dumps(update_from_github.download_progress)
self.send_response('text/html', data)
def req_init_module_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
try:
module = reqs['module'][0]
config.load()
if reqs['cmd'] == ['start']:
result = module_init.start(module)
data = '{ "module": "%s", "cmd": "start", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['stop']:
result = module_init.stop(module)
data = '{ "module": "%s", "cmd": "stop", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['restart']:
result_stop = module_init.stop(module)
result_start = module_init.start(module)
data = '{ "module": "%s", "cmd": "restart", "stop_result": "%s", "start_result": "%s" }' % (module, result_stop, result_start)
except Exception as e:
xlog.exception("init_module except:%s", e)
self.send_response("text/html", data)
process = 0
server = 0
def start():
global process, server
# should use config.yaml to bing ip
allow_remote = config.get(["modules", "launcher", "allow_remote_connect"], 0)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
if allow_remote:
host_addr = "0.0.0.0"
else:
host_addr = "127.0.0.1"
xlog.info("begin to start web control")
server = simple_http_server.HTTPServer((host_addr, host_port), Http_Handler)
process = threading.Thread(target=server.serve_forever)
process.setDaemon(True)
process.start()
xlog.info("launcher web control started.")
def stop():
global process, server
if process == 0:
return
xlog.info("begin to exit web control")
server.shutdown()
server.server_close()
process.join()
xlog.info("launcher web control exited.")
process = 0
def http_request(url, method="GET"):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
try:
req = opener.open(url, timeout=30)
return req
except Exception as e:
#logging.exception("web_control http_request:%s fail:%s", url, e)
return False
def confirm_xxnet_exit():
"""suppose xxnet is running, try to close it
"""
is_xxnet_exit = False
xlog.debug("start confirm_xxnet_exit")
for i in range(30):
# gae_proxy(default port:8087)
if http_request("http://127.0.0.1:8087/quit") == False:
xlog.debug("good, xxnet:8087 cleared!")
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:8087" % i)
time.sleep(1)
for i in range(30):
# web_control(default port:8085)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
req_url = "http://127.0.0.1:{port}/quit".format(port=host_port)
if http_request(req_url) == False:
xlog.debug("good, xxnet:%s clear!" % host_port)
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:%s" % (i, host_port))
time.sleep(1)
xlog.debug("finished confirm_xxnet_exit")
return is_xxnet_exit
def confirm_module_ready(port):
if port == 0:
xlog.error("confirm_module_ready with port 0")
time.sleep(1)
return False
for i in range(200):
req = http_request("http://127.0.0.1:%d/is_ready" % port)
if req == False:
time.sleep(1)
continue
content = req.read(1024)
req.close()
#logging.debug("cert_import_ready return:%s", content)
if content == "True":
return True
else:
time.sleep(1)
return False
if __name__ == "__main__":
#confirm_xxnet_exit()
http_request("http://getbootstrap.com/dist/js/bootstrap.min.js")
|
test_auth.py
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import time
import threading
from azure.identity import EnvironmentCredential
from azure.eventhub import EventData, EventHubProducerClient, EventHubConsumerClient, EventHubSharedKeyCredential
from azure.eventhub._client_base import EventHubSASTokenCredential
from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential
@pytest.mark.liveTest
def test_client_secret_credential(live_eventhub):
credential = EnvironmentCredential()
producer_client = EventHubProducerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
credential=credential,
user_agent='customized information')
consumer_client = EventHubConsumerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
consumer_group='$default',
credential=credential,
user_agent='customized information')
with producer_client:
batch = producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
producer_client.send_batch(batch)
def on_event(partition_context, event):
on_event.called = True
on_event.partition_id = partition_context.partition_id
on_event.event = event
on_event.called = False
with consumer_client:
worker = threading.Thread(target=consumer_client.receive, args=(on_event,),
kwargs={
"partition_id": '0',
"starting_position": '-1'
})
worker.start()
time.sleep(13)
worker.join()
assert on_event.called is True
assert on_event.partition_id == "0"
assert list(on_event.event.body)[0] == 'A single message'.encode('utf-8')
@pytest.mark.liveTest
def test_client_sas_credential(live_eventhub):
# This should "just work" to validate known-good.
hostname = live_eventhub['hostname']
producer_client = EventHubProducerClient.from_connection_string(live_eventhub['connection_str'], eventhub_name = live_eventhub['event_hub'])
with producer_client:
batch = producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
producer_client.send_batch(batch)
# This should also work, but now using SAS tokens.
credential = EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
auth_uri = "sb://{}/{}".format(hostname, live_eventhub['event_hub'])
token = credential.get_token(auth_uri).token
producer_client = EventHubProducerClient(fully_qualified_namespace=hostname,
eventhub_name=live_eventhub['event_hub'],
credential=EventHubSASTokenCredential(token, time.time() + 3000))
with producer_client:
batch = producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
producer_client.send_batch(batch)
# Finally let's do it with SAS token + conn str
token_conn_str = "Endpoint=sb://{}/;SharedAccessSignature={};".format(hostname, token.decode())
conn_str_producer_client = EventHubProducerClient.from_connection_string(token_conn_str,
eventhub_name=live_eventhub['event_hub'])
with conn_str_producer_client:
batch = conn_str_producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
conn_str_producer_client.send_batch(batch)
@pytest.mark.liveTest
def test_client_azure_sas_credential(live_eventhub):
# This should "just work" to validate known-good.
hostname = live_eventhub['hostname']
producer_client = EventHubProducerClient.from_connection_string(live_eventhub['connection_str'], eventhub_name = live_eventhub['event_hub'])
with producer_client:
batch = producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
producer_client.send_batch(batch)
# This should also work, but now using SAS tokens.
credential = EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
auth_uri = "sb://{}/{}".format(hostname, live_eventhub['event_hub'])
token = credential.get_token(auth_uri).token.decode()
producer_client = EventHubProducerClient(fully_qualified_namespace=hostname,
eventhub_name=live_eventhub['event_hub'],
credential=AzureSasCredential(token))
with producer_client:
batch = producer_client.create_batch(partition_id='0')
batch.add(EventData(body='A single message'))
producer_client.send_batch(batch)
@pytest.mark.liveTest
def test_client_azure_named_key_credential(live_eventhub):
credential = AzureNamedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
consumer_client = EventHubConsumerClient(fully_qualified_namespace=live_eventhub['hostname'],
eventhub_name=live_eventhub['event_hub'],
consumer_group='$default',
credential=credential,
user_agent='customized information')
assert consumer_client.get_eventhub_properties() is not None
credential.update("foo", "bar")
with pytest.raises(Exception):
consumer_client.get_eventhub_properties()
credential.update(live_eventhub['key_name'], live_eventhub['access_key'])
assert consumer_client.get_eventhub_properties() is not None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.